2024-12-10 11:01:08,921 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-10 11:01:08,937 main DEBUG Took 0.013309 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 11:01:08,937 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 11:01:08,938 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 11:01:08,939 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 11:01:08,940 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,971 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 11:01:08,985 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,986 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,987 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,987 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,988 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,988 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,989 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,989 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,989 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,990 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,991 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,991 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,992 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,993 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,993 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,994 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,994 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,995 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,995 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,995 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:01:08,996 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,996 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 11:01:08,998 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:01:08,999 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 11:01:09,001 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 11:01:09,001 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 11:01:09,002 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 11:01:09,003 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 11:01:09,013 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 11:01:09,016 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 11:01:09,018 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 11:01:09,018 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 11:01:09,019 main DEBUG createAppenders(={Console}) 2024-12-10 11:01:09,020 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-10 11:01:09,020 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-10 11:01:09,020 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-10 11:01:09,021 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 11:01:09,022 main DEBUG OutputStream closed 2024-12-10 11:01:09,022 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 11:01:09,022 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 11:01:09,023 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-10 11:01:09,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 11:01:09,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 11:01:09,113 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 11:01:09,114 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 11:01:09,115 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 11:01:09,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 11:01:09,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 11:01:09,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 11:01:09,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 11:01:09,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 11:01:09,118 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 11:01:09,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 11:01:09,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 11:01:09,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 11:01:09,120 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 11:01:09,120 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 11:01:09,121 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 11:01:09,121 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 11:01:09,124 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 11:01:09,125 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-10 11:01:09,125 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 11:01:09,126 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-10T11:01:09,143 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-10 11:01:09,146 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 11:01:09,147 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T11:01:09,450 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19 2024-12-10T11:01:09,484 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba, deleteOnExit=true 2024-12-10T11:01:09,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/test.cache.data in system properties and HBase conf 2024-12-10T11:01:09,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T11:01:09,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir in system properties and HBase conf 2024-12-10T11:01:09,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T11:01:09,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T11:01:09,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T11:01:09,622 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T11:01:09,765 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T11:01:09,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:01:09,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:01:09,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T11:01:09,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:01:09,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T11:01:09,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T11:01:09,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:01:09,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:01:09,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T11:01:09,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/nfs.dump.dir in system properties and HBase conf 2024-12-10T11:01:09,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/java.io.tmpdir in system properties and HBase conf 2024-12-10T11:01:09,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:01:09,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T11:01:09,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T11:01:10,798 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T11:01:10,914 INFO [Time-limited test {}] log.Log(170): Logging initialized @2816ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T11:01:11,004 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:11,092 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:11,123 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:11,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:11,126 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:01:11,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:11,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:11,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:11,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/java.io.tmpdir/jetty-localhost-38821-hadoop-hdfs-3_4_1-tests_jar-_-any-7750362353234553134/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:01:11,402 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:38821} 2024-12-10T11:01:11,402 INFO [Time-limited test {}] server.Server(415): Started @3306ms 2024-12-10T11:01:11,845 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:11,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:11,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:11,855 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:11,855 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:01:11,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:11,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:11,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/java.io.tmpdir/jetty-localhost-40661-hadoop-hdfs-3_4_1-tests_jar-_-any-14651344238527221203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:11,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:40661} 2024-12-10T11:01:11,996 INFO [Time-limited test {}] server.Server(415): Started @3900ms 2024-12-10T11:01:12,065 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:01:12,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:12,208 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:12,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:12,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:12,210 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:01:12,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:12,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:12,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/java.io.tmpdir/jetty-localhost-34085-hadoop-hdfs-3_4_1-tests_jar-_-any-11976395196592716175/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:12,353 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:34085} 2024-12-10T11:01:12,353 INFO [Time-limited test {}] server.Server(415): Started @4257ms 2024-12-10T11:01:12,356 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:01:12,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:12,452 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:12,462 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:12,462 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:12,463 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:01:12,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:12,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:12,585 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data2/current/BP-475784458-172.17.0.2-1733828470508/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:12,585 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data1/current/BP-475784458-172.17.0.2-1733828470508/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:12,585 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data4/current/BP-475784458-172.17.0.2-1733828470508/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:12,585 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data3/current/BP-475784458-172.17.0.2-1733828470508/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:12,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/java.io.tmpdir/jetty-localhost-45997-hadoop-hdfs-3_4_1-tests_jar-_-any-6956594859799490353/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:12,623 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:45997} 2024-12-10T11:01:12,624 INFO [Time-limited test {}] server.Server(415): Started @4527ms 2024-12-10T11:01:12,626 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:01:12,655 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:01:12,657 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:01:12,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x497fe98cd987fda2 with lease ID 0xd17ad3ac63812a59: Processing first storage report for DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092 from datanode DatanodeRegistration(127.0.0.1:35865, datanodeUuid=a9b8a64d-fbbd-4134-9b04-4d7295a4be94, infoPort=36193, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508) 2024-12-10T11:01:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x497fe98cd987fda2 with lease ID 0xd17ad3ac63812a59: from storage DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092 node DatanodeRegistration(127.0.0.1:35865, datanodeUuid=a9b8a64d-fbbd-4134-9b04-4d7295a4be94, infoPort=36193, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T11:01:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0824ca8738a93e6 with lease ID 0xd17ad3ac63812a58: Processing first storage report for DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11 from datanode DatanodeRegistration(127.0.0.1:46195, datanodeUuid=0bf4b057-e912-466f-be95-ded1180f5da9, infoPort=41617, infoSecurePort=0, ipcPort=35887, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508) 2024-12-10T11:01:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0824ca8738a93e6 with lease ID 0xd17ad3ac63812a58: from storage DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11 node DatanodeRegistration(127.0.0.1:46195, datanodeUuid=0bf4b057-e912-466f-be95-ded1180f5da9, infoPort=41617, infoSecurePort=0, ipcPort=35887, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x497fe98cd987fda2 with lease ID 0xd17ad3ac63812a59: Processing first storage report for DS-a4a24cc2-e30c-43d1-a64c-afb099156fde from datanode DatanodeRegistration(127.0.0.1:35865, datanodeUuid=a9b8a64d-fbbd-4134-9b04-4d7295a4be94, infoPort=36193, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508) 2024-12-10T11:01:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x497fe98cd987fda2 with lease ID 0xd17ad3ac63812a59: from storage DS-a4a24cc2-e30c-43d1-a64c-afb099156fde node DatanodeRegistration(127.0.0.1:35865, datanodeUuid=a9b8a64d-fbbd-4134-9b04-4d7295a4be94, infoPort=36193, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0824ca8738a93e6 with lease ID 0xd17ad3ac63812a58: Processing first storage report for DS-406f8daf-9aa3-40f4-9716-0d54802a51ab from datanode DatanodeRegistration(127.0.0.1:46195, datanodeUuid=0bf4b057-e912-466f-be95-ded1180f5da9, infoPort=41617, infoSecurePort=0, ipcPort=35887, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508) 2024-12-10T11:01:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0824ca8738a93e6 with lease ID 0xd17ad3ac63812a58: from storage DS-406f8daf-9aa3-40f4-9716-0d54802a51ab node DatanodeRegistration(127.0.0.1:46195, datanodeUuid=0bf4b057-e912-466f-be95-ded1180f5da9, infoPort=41617, infoSecurePort=0, ipcPort=35887, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T11:01:12,838 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data5/current/BP-475784458-172.17.0.2-1733828470508/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:12,838 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data6/current/BP-475784458-172.17.0.2-1733828470508/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:12,874 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:01:12,881 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8883e95c4b87f4d5 with lease ID 0xd17ad3ac63812a5a: Processing first storage report for DS-312ac9b6-5268-45dc-874f-eb5809300bbf from datanode DatanodeRegistration(127.0.0.1:33677, datanodeUuid=4ae71281-d36b-49c9-a42e-a3f675ff9a7b, infoPort=37141, infoSecurePort=0, ipcPort=44027, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508) 2024-12-10T11:01:12,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8883e95c4b87f4d5 with lease ID 0xd17ad3ac63812a5a: from storage DS-312ac9b6-5268-45dc-874f-eb5809300bbf node DatanodeRegistration(127.0.0.1:33677, datanodeUuid=4ae71281-d36b-49c9-a42e-a3f675ff9a7b, infoPort=37141, infoSecurePort=0, ipcPort=44027, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:12,882 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8883e95c4b87f4d5 with lease ID 0xd17ad3ac63812a5a: Processing first storage report for DS-f030e87a-1d8d-4164-9842-0a39cd4251a9 from datanode DatanodeRegistration(127.0.0.1:33677, datanodeUuid=4ae71281-d36b-49c9-a42e-a3f675ff9a7b, infoPort=37141, infoSecurePort=0, ipcPort=44027, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508) 2024-12-10T11:01:12,882 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8883e95c4b87f4d5 with lease ID 0xd17ad3ac63812a5a: from storage DS-f030e87a-1d8d-4164-9842-0a39cd4251a9 node DatanodeRegistration(127.0.0.1:33677, datanodeUuid=4ae71281-d36b-49c9-a42e-a3f675ff9a7b, infoPort=37141, infoSecurePort=0, ipcPort=44027, storageInfo=lv=-57;cid=testClusterID;nsid=848606317;c=1733828470508), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T11:01:13,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19 2024-12-10T11:01:13,166 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-10T11:01:13,241 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=1014, ProcessCount=11, AvailableMemoryMB=4804 2024-12-10T11:01:13,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T11:01:13,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-10T11:01:13,363 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/zookeeper_0, clientPort=52356, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T11:01:13,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52356 2024-12-10T11:01:13,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:13,392 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:13,499 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:13,499 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:13,558 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:45124 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:46195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45124 dst: /127.0.0.1:46195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-10T11:01:13,981 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:13,991 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a with version=8 2024-12-10T11:01:13,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/hbase-staging 2024-12-10T11:01:14,096 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T11:01:14,384 INFO [Time-limited test {}] client.ConnectionUtils(128): master/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:14,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:14,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:14,401 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:14,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:14,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:14,565 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T11:01:14,631 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T11:01:14,641 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T11:01:14,645 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:14,674 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 29999 (auto-detected) 2024-12-10T11:01:14,676 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T11:01:14,695 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39553 2024-12-10T11:01:14,719 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39553 connecting to ZooKeeper ensemble=127.0.0.1:52356 2024-12-10T11:01:14,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395530x0, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:14,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39553-0x10176cde6a20000 connected 2024-12-10T11:01:14,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:14,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:14,803 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:14,807 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a, hbase.cluster.distributed=false 2024-12-10T11:01:14,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:14,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39553 2024-12-10T11:01:14,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39553 2024-12-10T11:01:14,842 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39553 2024-12-10T11:01:14,843 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39553 2024-12-10T11:01:14,843 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39553 2024-12-10T11:01:14,972 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:14,974 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:14,974 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:14,974 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:14,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:14,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:14,978 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:01:14,981 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:14,982 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33263 2024-12-10T11:01:14,984 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33263 connecting to ZooKeeper ensemble=127.0.0.1:52356 2024-12-10T11:01:14,986 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:14,992 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:15,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332630x0, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:15,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:332630x0, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:15,009 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:01:15,014 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33263-0x10176cde6a20001 connected 2024-12-10T11:01:15,019 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:01:15,022 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:01:15,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:15,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33263 2024-12-10T11:01:15,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33263 2024-12-10T11:01:15,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33263 2024-12-10T11:01:15,035 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33263 2024-12-10T11:01:15,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33263 2024-12-10T11:01:15,055 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:15,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:15,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:15,057 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:15,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:15,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:15,058 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:01:15,058 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:15,059 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45813 2024-12-10T11:01:15,062 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45813 connecting to ZooKeeper ensemble=127.0.0.1:52356 2024-12-10T11:01:15,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:15,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:15,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458130x0, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:15,078 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:458130x0, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:15,078 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:01:15,081 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45813-0x10176cde6a20002 connected 2024-12-10T11:01:15,088 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:01:15,089 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:01:15,092 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:15,097 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45813 2024-12-10T11:01:15,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45813 2024-12-10T11:01:15,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45813 2024-12-10T11:01:15,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45813 2024-12-10T11:01:15,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45813 2024-12-10T11:01:15,122 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:15,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:15,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:15,122 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:15,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:15,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:15,123 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:01:15,123 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:15,124 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35357 2024-12-10T11:01:15,125 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35357 connecting to ZooKeeper ensemble=127.0.0.1:52356 2024-12-10T11:01:15,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:15,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:15,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:353570x0, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:15,136 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35357-0x10176cde6a20003 connected 2024-12-10T11:01:15,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:15,137 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:01:15,141 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:01:15,142 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:01:15,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:15,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35357 2024-12-10T11:01:15,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35357 2024-12-10T11:01:15,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35357 2024-12-10T11:01:15,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35357 2024-12-10T11:01:15,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35357 2024-12-10T11:01:15,179 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;944a6b9062fa:39553 2024-12-10T11:01:15,180 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/944a6b9062fa,39553,1733828474155 2024-12-10T11:01:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,190 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/944a6b9062fa,39553,1733828474155 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,214 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T11:01:15,216 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/944a6b9062fa,39553,1733828474155 from backup master directory 2024-12-10T11:01:15,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/944a6b9062fa,39553,1733828474155 2024-12-10T11:01:15,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:15,220 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:15,220 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=944a6b9062fa,39553,1733828474155 2024-12-10T11:01:15,222 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T11:01:15,224 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T11:01:15,298 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/hbase.id] with ID: 0bc51a23-a87d-44cc-83bc-2b18235d30a0 2024-12-10T11:01:15,299 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/.tmp/hbase.id 2024-12-10T11:01:15,309 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,309 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,313 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:41430 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:35865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41430 dst: /127.0.0.1:35865 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:15,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-10T11:01:15,325 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:15,326 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/.tmp/hbase.id]:[hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/hbase.id] 2024-12-10T11:01:15,374 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:15,378 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T11:01:15,402 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-12-10T11:01:15,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:15,423 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,424 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,430 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:41452 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:35865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41452 dst: /127.0.0.1:35865 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:15,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-10T11:01:15,440 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:15,458 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:01:15,460 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T11:01:15,466 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:01:15,497 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,498 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,502 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51276 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51276 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:15,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-10T11:01:15,510 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:15,529 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store 2024-12-10T11:01:15,544 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,545 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:15,553 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:41480 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41480 dst: /127.0.0.1:35865 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:15,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-10T11:01:15,558 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:15,563 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T11:01:15,566 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:15,567 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:01:15,567 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:15,568 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:15,569 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:01:15,569 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:15,569 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:15,570 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733828475567Disabling compacts and flushes for region at 1733828475567Disabling writes for close at 1733828475569 (+2 ms)Writing region close event to WAL at 1733828475569Closed at 1733828475569 2024-12-10T11:01:15,572 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/.initializing 2024-12-10T11:01:15,572 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/WALs/944a6b9062fa,39553,1733828474155 2024-12-10T11:01:15,581 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:01:15,599 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C39553%2C1733828474155, suffix=, logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/WALs/944a6b9062fa,39553,1733828474155, archiveDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/oldWALs, maxLogs=10 2024-12-10T11:01:15,639 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/WALs/944a6b9062fa,39553,1733828474155/944a6b9062fa%2C39553%2C1733828474155.1733828475604, exclude list is [], retry=0 2024-12-10T11:01:15,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:15,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46195,DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11,DISK] 2024-12-10T11:01:15,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33677,DS-312ac9b6-5268-45dc-874f-eb5809300bbf,DISK] 2024-12-10T11:01:15,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35865,DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092,DISK] 2024-12-10T11:01:15,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T11:01:15,717 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/WALs/944a6b9062fa,39553,1733828474155/944a6b9062fa%2C39553%2C1733828474155.1733828475604 2024-12-10T11:01:15,718 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36193:36193),(127.0.0.1/127.0.0.1:41617:41617),(127.0.0.1/127.0.0.1:37141:37141)] 2024-12-10T11:01:15,718 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:01:15,719 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:15,722 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,723 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T11:01:15,803 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:15,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:15,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T11:01:15,811 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:15,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:15,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T11:01:15,816 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:15,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:15,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T11:01:15,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:15,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:15,821 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,825 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,827 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,834 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,834 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,839 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:01:15,843 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:15,860 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:01:15,861 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65163312, jitterRate=-0.028990983963012695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:01:15,869 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733828475737Initializing all the Stores at 1733828475739 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828475740 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828475741 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828475741Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828475741Cleaning up temporary data from old regions at 1733828475834 (+93 ms)Region opened successfully at 1733828475868 (+34 ms) 2024-12-10T11:01:15,870 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T11:01:15,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-10T11:01:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-10T11:01:15,921 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19d80097, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:15,959 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T11:01:15,972 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T11:01:15,973 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T11:01:15,976 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T11:01:15,977 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T11:01:15,983 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-10T11:01:15,984 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T11:01:16,018 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T11:01:16,030 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T11:01:16,033 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T11:01:16,036 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T11:01:16,038 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T11:01:16,040 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T11:01:16,043 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T11:01:16,048 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T11:01:16,049 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T11:01:16,051 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T11:01:16,053 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T11:01:16,077 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T11:01:16,079 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T11:01:16,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:16,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:16,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:16,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:16,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,089 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=944a6b9062fa,39553,1733828474155, sessionid=0x10176cde6a20000, setting cluster-up flag (Was=false) 2024-12-10T11:01:16,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,114 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T11:01:16,117 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=944a6b9062fa,39553,1733828474155 2024-12-10T11:01:16,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,130 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T11:01:16,133 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=944a6b9062fa,39553,1733828474155 2024-12-10T11:01:16,143 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T11:01:16,163 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(746): ClusterId : 0bc51a23-a87d-44cc-83bc-2b18235d30a0 2024-12-10T11:01:16,164 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(746): ClusterId : 0bc51a23-a87d-44cc-83bc-2b18235d30a0 2024-12-10T11:01:16,164 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(746): ClusterId : 0bc51a23-a87d-44cc-83bc-2b18235d30a0 2024-12-10T11:01:16,167 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:01:16,167 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:01:16,167 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:01:16,173 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:01:16,173 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:01:16,173 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:01:16,173 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:01:16,173 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:01:16,174 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:01:16,177 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:01:16,177 DEBUG [RS:2;944a6b9062fa:35357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fb3d6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:16,181 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:01:16,181 DEBUG [RS:1;944a6b9062fa:45813 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c0c067e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:16,182 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:01:16,182 DEBUG [RS:0;944a6b9062fa:33263 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c8a3696, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:16,205 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;944a6b9062fa:33263 2024-12-10T11:01:16,206 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;944a6b9062fa:35357 2024-12-10T11:01:16,211 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:01:16,211 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:01:16,212 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:01:16,212 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:01:16,212 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:01:16,212 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:01:16,215 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,39553,1733828474155 with port=33263, startcode=1733828474924 2024-12-10T11:01:16,215 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,39553,1733828474155 with port=35357, startcode=1733828475121 2024-12-10T11:01:16,218 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;944a6b9062fa:45813 2024-12-10T11:01:16,219 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:01:16,219 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:01:16,219 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:01:16,220 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,39553,1733828474155 with port=45813, startcode=1733828475055 2024-12-10T11:01:16,230 DEBUG [RS:0;944a6b9062fa:33263 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:01:16,230 DEBUG [RS:1;944a6b9062fa:45813 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:01:16,230 DEBUG [RS:2;944a6b9062fa:35357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:01:16,242 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:16,253 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T11:01:16,263 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T11:01:16,270 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 944a6b9062fa,39553,1733828474155 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T11:01:16,276 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36455, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:01:16,276 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47757, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:01:16,276 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38889, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:01:16,283 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-10T11:01:16,285 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:16,285 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:16,285 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:16,286 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:16,286 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/944a6b9062fa:0, corePoolSize=10, maxPoolSize=10 2024-12-10T11:01:16,286 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,286 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:16,286 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,288 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-10T11:01:16,289 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-10T11:01:16,292 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733828506292 2024-12-10T11:01:16,294 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T11:01:16,296 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T11:01:16,298 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:16,299 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T11:01:16,300 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T11:01:16,300 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T11:01:16,301 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T11:01:16,301 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T11:01:16,306 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:16,306 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T11:01:16,310 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,315 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T11:01:16,316 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T11:01:16,317 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T11:01:16,321 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T11:01:16,322 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T11:01:16,322 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:16,323 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T11:01:16,323 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T11:01:16,323 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T11:01:16,323 WARN [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T11:01:16,323 WARN [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T11:01:16,323 WARN [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T11:01:16,323 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:16,325 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.large.0-1733828476323,5,FailOnTimeoutGroup] 2024-12-10T11:01:16,333 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.small.0-1733828476325,5,FailOnTimeoutGroup] 2024-12-10T11:01:16,333 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,333 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T11:01:16,335 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,335 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51312 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51312 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:16,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-10T11:01:16,353 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:16,354 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T11:01:16,354 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a 2024-12-10T11:01:16,362 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:16,362 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:16,366 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:41528 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:35865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41528 dst: /127.0.0.1:35865 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:16,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-10T11:01:16,372 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:16,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:16,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:01:16,378 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:01:16,378 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:16,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:16,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:01:16,381 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:01:16,381 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:16,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:16,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:01:16,385 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:01:16,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:16,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:16,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:01:16,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:01:16,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:16,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:16,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:01:16,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740 2024-12-10T11:01:16,392 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740 2024-12-10T11:01:16,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:01:16,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:01:16,396 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:01:16,398 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:01:16,408 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:01:16,409 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61853933, jitterRate=-0.07830457389354706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:01:16,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733828476374Initializing all the Stores at 1733828476375 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828476375Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828476375Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828476376 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828476376Cleaning up temporary data from old regions at 1733828476395 (+19 ms)Region opened successfully at 1733828476413 (+18 ms) 2024-12-10T11:01:16,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:01:16,414 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:01:16,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:01:16,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:01:16,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:01:16,416 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:01:16,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733828476413Disabling compacts and flushes for region at 1733828476414 (+1 ms)Disabling writes for close at 1733828476414Writing region close event to WAL at 1733828476415 (+1 ms)Closed at 1733828476416 (+1 ms) 2024-12-10T11:01:16,420 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:16,420 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T11:01:16,424 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,39553,1733828474155 with port=33263, startcode=1733828474924 2024-12-10T11:01:16,424 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,39553,1733828474155 with port=45813, startcode=1733828475055 2024-12-10T11:01:16,424 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,39553,1733828474155 with port=35357, startcode=1733828475121 2024-12-10T11:01:16,427 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 944a6b9062fa,45813,1733828475055 2024-12-10T11:01:16,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T11:01:16,430 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] master.ServerManager(517): Registering regionserver=944a6b9062fa,45813,1733828475055 2024-12-10T11:01:16,437 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:01:16,439 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,439 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] master.ServerManager(517): Registering regionserver=944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,440 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a 2024-12-10T11:01:16,440 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40209 2024-12-10T11:01:16,440 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:01:16,444 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T11:01:16,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:16,444 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 944a6b9062fa,33263,1733828474924 2024-12-10T11:01:16,445 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a 2024-12-10T11:01:16,445 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39553 {}] master.ServerManager(517): Registering regionserver=944a6b9062fa,33263,1733828474924 2024-12-10T11:01:16,445 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40209 2024-12-10T11:01:16,445 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:01:16,446 DEBUG [RS:1;944a6b9062fa:45813 {}] zookeeper.ZKUtil(111): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/944a6b9062fa,45813,1733828475055 2024-12-10T11:01:16,446 WARN [RS:1;944a6b9062fa:45813 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:16,446 INFO [RS:1;944a6b9062fa:45813 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:01:16,446 DEBUG [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,45813,1733828475055 2024-12-10T11:01:16,449 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a 2024-12-10T11:01:16,450 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40209 2024-12-10T11:01:16,450 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:01:16,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:16,453 DEBUG [RS:2;944a6b9062fa:35357 {}] zookeeper.ZKUtil(111): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,453 WARN [RS:2;944a6b9062fa:35357 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:16,454 INFO [RS:2;944a6b9062fa:35357 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:01:16,454 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,455 DEBUG [RS:0;944a6b9062fa:33263 {}] zookeeper.ZKUtil(111): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/944a6b9062fa,33263,1733828474924 2024-12-10T11:01:16,455 WARN [RS:0;944a6b9062fa:33263 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:16,455 INFO [RS:0;944a6b9062fa:33263 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:01:16,455 DEBUG [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,33263,1733828474924 2024-12-10T11:01:16,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [944a6b9062fa,35357,1733828475121] 2024-12-10T11:01:16,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [944a6b9062fa,45813,1733828475055] 2024-12-10T11:01:16,458 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [944a6b9062fa,33263,1733828474924] 2024-12-10T11:01:16,479 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:01:16,479 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:01:16,479 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:01:16,495 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:01:16,495 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:01:16,495 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:01:16,502 INFO [RS:1;944a6b9062fa:45813 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:01:16,502 INFO [RS:0;944a6b9062fa:33263 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:01:16,502 INFO [RS:2;944a6b9062fa:35357 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:01:16,502 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,502 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,502 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,503 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:01:16,505 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:01:16,505 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:01:16,511 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:01:16,511 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:01:16,511 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:01:16,513 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,513 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,513 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,513 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,513 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:16,514 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:16,514 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:16,514 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,514 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:16,515 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:0;944a6b9062fa:33263 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:16,515 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:16,515 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:16,515 DEBUG [RS:1;944a6b9062fa:45813 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:16,515 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:16,516 DEBUG [RS:2;944a6b9062fa:35357 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:16,521 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,521 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,522 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,522 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,522 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,522 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,33263,1733828474924-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:16,524 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,524 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,524 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,524 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,524 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,524 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,525 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,525 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,525 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,525 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,526 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35357,1733828475121-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:16,527 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,45813,1733828475055-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:16,547 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:01:16,554 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,33263,1733828474924-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,556 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:01:16,556 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35357,1733828475121-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,556 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,557 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.Replication(171): 944a6b9062fa,35357,1733828475121 started 2024-12-10T11:01:16,555 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,558 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.Replication(171): 944a6b9062fa,33263,1733828474924 started 2024-12-10T11:01:16,560 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:01:16,561 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,45813,1733828475055-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,561 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,561 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.Replication(171): 944a6b9062fa,45813,1733828475055 started 2024-12-10T11:01:16,583 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,583 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,584 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(1482): Serving as 944a6b9062fa,33263,1733828474924, RpcServer on 944a6b9062fa/172.17.0.2:33263, sessionid=0x10176cde6a20001 2024-12-10T11:01:16,584 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(1482): Serving as 944a6b9062fa,45813,1733828475055, RpcServer on 944a6b9062fa/172.17.0.2:45813, sessionid=0x10176cde6a20002 2024-12-10T11:01:16,584 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:01:16,584 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:01:16,585 DEBUG [RS:0;944a6b9062fa:33263 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 944a6b9062fa,33263,1733828474924 2024-12-10T11:01:16,585 DEBUG [RS:1;944a6b9062fa:45813 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 944a6b9062fa,45813,1733828475055 2024-12-10T11:01:16,585 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,45813,1733828475055' 2024-12-10T11:01:16,585 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,33263,1733828474924' 2024-12-10T11:01:16,585 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:01:16,585 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:01:16,586 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:01:16,586 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:01:16,587 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:01:16,587 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:01:16,587 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:01:16,587 DEBUG [RS:1;944a6b9062fa:45813 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 944a6b9062fa,45813,1733828475055 2024-12-10T11:01:16,587 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,45813,1733828475055' 2024-12-10T11:01:16,587 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:01:16,587 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:01:16,587 DEBUG [RS:0;944a6b9062fa:33263 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 944a6b9062fa,33263,1733828474924 2024-12-10T11:01:16,587 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,33263,1733828474924' 2024-12-10T11:01:16,588 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:01:16,588 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:01:16,588 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:01:16,589 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:16,589 DEBUG [RS:1;944a6b9062fa:45813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:01:16,589 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1482): Serving as 944a6b9062fa,35357,1733828475121, RpcServer on 944a6b9062fa/172.17.0.2:35357, sessionid=0x10176cde6a20003 2024-12-10T11:01:16,589 INFO [RS:1;944a6b9062fa:45813 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:01:16,589 INFO [RS:1;944a6b9062fa:45813 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:01:16,589 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:01:16,589 DEBUG [RS:2;944a6b9062fa:35357 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,589 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,35357,1733828475121' 2024-12-10T11:01:16,589 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:01:16,589 DEBUG [RS:0;944a6b9062fa:33263 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:01:16,590 INFO [RS:0;944a6b9062fa:33263 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:01:16,590 INFO [RS:0;944a6b9062fa:33263 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:01:16,590 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:01:16,591 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:01:16,591 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:01:16,591 DEBUG [RS:2;944a6b9062fa:35357 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,591 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,35357,1733828475121' 2024-12-10T11:01:16,591 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:01:16,592 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:01:16,593 DEBUG [RS:2;944a6b9062fa:35357 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:01:16,593 INFO [RS:2;944a6b9062fa:35357 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:01:16,593 INFO [RS:2;944a6b9062fa:35357 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:01:16,594 WARN [944a6b9062fa:39553 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T11:01:16,695 INFO [RS:2;944a6b9062fa:35357 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:01:16,695 INFO [RS:0;944a6b9062fa:33263 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:01:16,695 INFO [RS:1;944a6b9062fa:45813 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:01:16,698 INFO [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C35357%2C1733828475121, suffix=, logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121, archiveDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs, maxLogs=32 2024-12-10T11:01:16,698 INFO [RS:0;944a6b9062fa:33263 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C33263%2C1733828474924, suffix=, logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,33263,1733828474924, archiveDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs, maxLogs=32 2024-12-10T11:01:16,698 INFO [RS:1;944a6b9062fa:45813 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C45813%2C1733828475055, suffix=, logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,45813,1733828475055, archiveDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs, maxLogs=32 2024-12-10T11:01:16,718 DEBUG [RS:0;944a6b9062fa:33263 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,33263,1733828474924/944a6b9062fa%2C33263%2C1733828474924.1733828476703, exclude list is [], retry=0 2024-12-10T11:01:16,718 DEBUG [RS:1;944a6b9062fa:45813 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,45813,1733828475055/944a6b9062fa%2C45813%2C1733828475055.1733828476703, exclude list is [], retry=0 2024-12-10T11:01:16,719 DEBUG [RS:2;944a6b9062fa:35357 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121/944a6b9062fa%2C35357%2C1733828475121.1733828476702, exclude list is [], retry=0 2024-12-10T11:01:16,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46195,DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11,DISK] 2024-12-10T11:01:16,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35865,DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092,DISK] 2024-12-10T11:01:16,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46195,DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11,DISK] 2024-12-10T11:01:16,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33677,DS-312ac9b6-5268-45dc-874f-eb5809300bbf,DISK] 2024-12-10T11:01:16,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33677,DS-312ac9b6-5268-45dc-874f-eb5809300bbf,DISK] 2024-12-10T11:01:16,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35865,DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092,DISK] 2024-12-10T11:01:16,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35865,DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092,DISK] 2024-12-10T11:01:16,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46195,DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11,DISK] 2024-12-10T11:01:16,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33677,DS-312ac9b6-5268-45dc-874f-eb5809300bbf,DISK] 2024-12-10T11:01:16,765 INFO [RS:1;944a6b9062fa:45813 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,45813,1733828475055/944a6b9062fa%2C45813%2C1733828475055.1733828476703 2024-12-10T11:01:16,766 INFO [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121/944a6b9062fa%2C35357%2C1733828475121.1733828476702 2024-12-10T11:01:16,766 INFO [RS:0;944a6b9062fa:33263 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,33263,1733828474924/944a6b9062fa%2C33263%2C1733828474924.1733828476703 2024-12-10T11:01:16,767 DEBUG [RS:1;944a6b9062fa:45813 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36193:36193),(127.0.0.1/127.0.0.1:37141:37141),(127.0.0.1/127.0.0.1:41617:41617)] 2024-12-10T11:01:16,767 DEBUG [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36193:36193),(127.0.0.1/127.0.0.1:41617:41617),(127.0.0.1/127.0.0.1:37141:37141)] 2024-12-10T11:01:16,767 DEBUG [RS:0;944a6b9062fa:33263 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41617:41617),(127.0.0.1/127.0.0.1:37141:37141),(127.0.0.1/127.0.0.1:36193:36193)] 2024-12-10T11:01:16,847 DEBUG [944a6b9062fa:39553 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T11:01:16,859 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(204): Hosts are {944a6b9062fa=0} racks are {/default-rack=0} 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:01:16,867 INFO [944a6b9062fa:39553 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:01:16,867 INFO [944a6b9062fa:39553 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:01:16,867 INFO [944a6b9062fa:39553 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:01:16,867 DEBUG [944a6b9062fa:39553 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:01:16,877 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=944a6b9062fa,35357,1733828475121 2024-12-10T11:01:16,885 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 944a6b9062fa,35357,1733828475121, state=OPENING 2024-12-10T11:01:16,891 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T11:01:16,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:16,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:16,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:16,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:16,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:16,897 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:01:16,899 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=944a6b9062fa,35357,1733828475121}] 2024-12-10T11:01:17,082 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T11:01:17,085 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39979, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T11:01:17,099 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T11:01:17,100 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:01:17,101 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T11:01:17,106 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C35357%2C1733828475121.meta, suffix=.meta, logDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121, archiveDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs, maxLogs=32 2024-12-10T11:01:17,131 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121/944a6b9062fa%2C35357%2C1733828475121.meta.1733828477109.meta, exclude list is [], retry=0 2024-12-10T11:01:17,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35865,DS-b2dc6677-cfd7-4f1d-af38-cb0e6c1d0092,DISK] 2024-12-10T11:01:17,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46195,DS-ac58aeb1-6a7b-4e3b-8f47-f258004c4d11,DISK] 2024-12-10T11:01:17,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33677,DS-312ac9b6-5268-45dc-874f-eb5809300bbf,DISK] 2024-12-10T11:01:17,145 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/WALs/944a6b9062fa,35357,1733828475121/944a6b9062fa%2C35357%2C1733828475121.meta.1733828477109.meta 2024-12-10T11:01:17,146 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36193:36193),(127.0.0.1/127.0.0.1:41617:41617),(127.0.0.1/127.0.0.1:37141:37141)] 2024-12-10T11:01:17,146 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:01:17,148 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T11:01:17,175 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T11:01:17,182 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T11:01:17,188 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T11:01:17,188 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:17,188 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T11:01:17,189 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T11:01:17,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:01:17,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:01:17,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:17,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:17,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:01:17,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:01:17,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:17,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:17,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:01:17,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:01:17,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:17,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:17,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:01:17,202 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:01:17,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:17,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:17,203 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:01:17,205 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740 2024-12-10T11:01:17,212 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740 2024-12-10T11:01:17,215 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:01:17,215 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:01:17,216 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:01:17,219 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:01:17,221 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71926887, jitterRate=0.07179413735866547}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:01:17,221 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T11:01:17,223 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733828477189Writing region info on filesystem at 1733828477190 (+1 ms)Initializing all the Stores at 1733828477192 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828477192Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828477192Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828477192Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828477192Cleaning up temporary data from old regions at 1733828477215 (+23 ms)Running coprocessor post-open hooks at 1733828477221 (+6 ms)Region opened successfully at 1733828477223 (+2 ms) 2024-12-10T11:01:17,232 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733828477069 2024-12-10T11:01:17,244 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T11:01:17,244 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T11:01:17,247 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=944a6b9062fa,35357,1733828475121 2024-12-10T11:01:17,249 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 944a6b9062fa,35357,1733828475121, state=OPEN 2024-12-10T11:01:17,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:17,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:17,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:17,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:17,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:17,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:17,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:17,253 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:17,253 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=944a6b9062fa,35357,1733828475121 2024-12-10T11:01:17,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T11:01:17,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=944a6b9062fa,35357,1733828475121 in 354 msec 2024-12-10T11:01:17,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T11:01:17,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 834 msec 2024-12-10T11:01:17,267 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:17,267 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T11:01:17,288 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:01:17,290 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=944a6b9062fa,35357,1733828475121, seqNum=-1] 2024-12-10T11:01:17,312 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:01:17,314 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54331, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:01:17,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1610 sec 2024-12-10T11:01:17,341 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733828477341, completionTime=-1 2024-12-10T11:01:17,343 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T11:01:17,343 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T11:01:17,369 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T11:01:17,369 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733828537369 2024-12-10T11:01:17,369 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733828597369 2024-12-10T11:01:17,369 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-12-10T11:01:17,371 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T11:01:17,379 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39553,1733828474155-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,379 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39553,1733828474155-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,380 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39553,1733828474155-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,381 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-944a6b9062fa:39553, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,382 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,382 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,389 DEBUG [master/944a6b9062fa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T11:01:17,412 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.191sec 2024-12-10T11:01:17,414 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T11:01:17,415 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T11:01:17,416 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T11:01:17,417 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T11:01:17,417 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T11:01:17,418 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39553,1733828474155-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:17,418 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39553,1733828474155-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T11:01:17,423 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T11:01:17,424 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T11:01:17,424 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39553,1733828474155-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:17,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@418871fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:01:17,490 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T11:01:17,490 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T11:01:17,493 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 944a6b9062fa,39553,-1 for getting cluster id 2024-12-10T11:01:17,496 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T11:01:17,506 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0bc51a23-a87d-44cc-83bc-2b18235d30a0' 2024-12-10T11:01:17,509 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T11:01:17,509 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0bc51a23-a87d-44cc-83bc-2b18235d30a0" 2024-12-10T11:01:17,510 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f0a8071, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:01:17,510 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [944a6b9062fa,39553,-1] 2024-12-10T11:01:17,513 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T11:01:17,515 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:17,516 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T11:01:17,519 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f46cc90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:01:17,520 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:01:17,527 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=944a6b9062fa,35357,1733828475121, seqNum=-1] 2024-12-10T11:01:17,528 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:01:17,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39560, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:01:17,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=944a6b9062fa,39553,1733828474155 2024-12-10T11:01:17,563 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T11:01:17,569 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 944a6b9062fa,39553,1733828474155 2024-12-10T11:01:17,571 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@79ae593d 2024-12-10T11:01:17,572 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T11:01:17,574 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56114, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T11:01:17,580 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:01:17,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-10T11:01:17,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T11:01:17,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-10T11:01:17,595 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:17,597 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T11:01:17,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:17,607 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:17,607 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:17,612 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:41592 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41592 dst: /127.0.0.1:35865 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:17,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-10T11:01:17,621 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:17,626 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 205ed45caa732e1de41041a7767b6145, NAME => 'TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a 2024-12-10T11:01:17,640 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:17,640 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:17,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51400 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51400 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:17,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-10T11:01:17,655 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:17,656 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:17,656 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 205ed45caa732e1de41041a7767b6145, disabling compactions & flushes 2024-12-10T11:01:17,656 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:17,656 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:17,656 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. after waiting 0 ms 2024-12-10T11:01:17,656 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:17,657 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:17,657 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 205ed45caa732e1de41041a7767b6145: Waiting for close lock at 1733828477656Disabling compacts and flushes for region at 1733828477656Disabling writes for close at 1733828477656Writing region close event to WAL at 1733828477656Closed at 1733828477656 2024-12-10T11:01:17,659 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T11:01:17,664 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733828477659"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733828477659"}]},"ts":"1733828477659"} 2024-12-10T11:01:17,672 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T11:01:17,674 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T11:01:17,677 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733828477674"}]},"ts":"1733828477674"} 2024-12-10T11:01:17,682 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-10T11:01:17,683 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {944a6b9062fa=0} racks are {/default-rack=0} 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:01:17,685 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:01:17,685 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:01:17,685 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:01:17,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:01:17,687 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=205ed45caa732e1de41041a7767b6145, ASSIGN}] 2024-12-10T11:01:17,690 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=205ed45caa732e1de41041a7767b6145, ASSIGN 2024-12-10T11:01:17,692 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=205ed45caa732e1de41041a7767b6145, ASSIGN; state=OFFLINE, location=944a6b9062fa,35357,1733828475121; forceNewPlan=false, retain=false 2024-12-10T11:01:17,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:17,846 INFO [944a6b9062fa:39553 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T11:01:17,847 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=205ed45caa732e1de41041a7767b6145, regionState=OPENING, regionLocation=944a6b9062fa,35357,1733828475121 2024-12-10T11:01:17,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=205ed45caa732e1de41041a7767b6145, ASSIGN because future has completed 2024-12-10T11:01:17,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 205ed45caa732e1de41041a7767b6145, server=944a6b9062fa,35357,1733828475121}] 2024-12-10T11:01:17,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:18,013 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,013 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 205ed45caa732e1de41041a7767b6145, NAME => 'TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:01:18,014 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,014 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:18,014 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,014 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,017 INFO [StoreOpener-205ed45caa732e1de41041a7767b6145-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,019 INFO [StoreOpener-205ed45caa732e1de41041a7767b6145-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 205ed45caa732e1de41041a7767b6145 columnFamilyName cf 2024-12-10T11:01:18,019 DEBUG [StoreOpener-205ed45caa732e1de41041a7767b6145-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:18,020 INFO [StoreOpener-205ed45caa732e1de41041a7767b6145-1 {}] regionserver.HStore(327): Store=205ed45caa732e1de41041a7767b6145/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:18,020 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,021 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,022 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,023 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,023 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,026 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,033 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:01:18,034 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 205ed45caa732e1de41041a7767b6145; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71757836, jitterRate=0.06927508115768433}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T11:01:18,034 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,035 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 205ed45caa732e1de41041a7767b6145: Running coprocessor pre-open hook at 1733828478014Writing region info on filesystem at 1733828478014Initializing all the Stores at 1733828478016 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828478016Cleaning up temporary data from old regions at 1733828478023 (+7 ms)Running coprocessor post-open hooks at 1733828478034 (+11 ms)Region opened successfully at 1733828478035 (+1 ms) 2024-12-10T11:01:18,037 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145., pid=6, masterSystemTime=1733828478006 2024-12-10T11:01:18,041 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,041 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,042 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=205ed45caa732e1de41041a7767b6145, regionState=OPEN, openSeqNum=2, regionLocation=944a6b9062fa,35357,1733828475121 2024-12-10T11:01:18,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 205ed45caa732e1de41041a7767b6145, server=944a6b9062fa,35357,1733828475121 because future has completed 2024-12-10T11:01:18,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T11:01:18,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 205ed45caa732e1de41041a7767b6145, server=944a6b9062fa,35357,1733828475121 in 195 msec 2024-12-10T11:01:18,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T11:01:18,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=205ed45caa732e1de41041a7767b6145, ASSIGN in 365 msec 2024-12-10T11:01:18,059 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T11:01:18,060 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733828478059"}]},"ts":"1733828478059"} 2024-12-10T11:01:18,063 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-10T11:01:18,064 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T11:01:18,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 481 msec 2024-12-10T11:01:18,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:18,235 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:01:18,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-10T11:01:18,236 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:01:18,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-10T11:01:18,243 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:01:18,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-10T11:01:18,252 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145., hostname=944a6b9062fa,35357,1733828475121, seqNum=2] 2024-12-10T11:01:18,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-10T11:01:18,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-10T11:01:18,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:18,270 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-10T11:01:18,272 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T11:01:18,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T11:01:18,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:18,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35357 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-10T11:01:18,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,441 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 205ed45caa732e1de41041a7767b6145 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-10T11:01:18,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/.tmp/cf/c2c35eaa21a543be805ee7199c2fcb6b is 36, key is row/cf:cq/1733828478255/Put/seqid=0 2024-12-10T11:01:18,506 WARN [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:18,507 WARN [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:18,519 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_532971149_22 at /127.0.0.1:45216 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45216 dst: /127.0.0.1:46195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:18,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-10T11:01:18,526 WARN [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:18,526 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/.tmp/cf/c2c35eaa21a543be805ee7199c2fcb6b 2024-12-10T11:01:18,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/.tmp/cf/c2c35eaa21a543be805ee7199c2fcb6b as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/cf/c2c35eaa21a543be805ee7199c2fcb6b 2024-12-10T11:01:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:18,590 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/cf/c2c35eaa21a543be805ee7199c2fcb6b, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T11:01:18,596 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 205ed45caa732e1de41041a7767b6145 in 155ms, sequenceid=5, compaction requested=false 2024-12-10T11:01:18,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-10T11:01:18,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 205ed45caa732e1de41041a7767b6145: 2024-12-10T11:01:18,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-10T11:01:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-10T11:01:18,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T11:01:18,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 332 msec 2024-12-10T11:01:18,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 352 msec 2024-12-10T11:01:18,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-10T11:01:18,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-10T11:01:18,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-10T11:01:18,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-10T11:01:18,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-10T11:01:18,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-10T11:01:18,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-10T11:01:18,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-10T11:01:18,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-10T11:01:18,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-10T11:01:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39553 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:18,894 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:01:18,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T11:01:18,911 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:01:18,911 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:18,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,918 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T11:01:18,918 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T11:01:18,918 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=875678016, stopped=false 2024-12-10T11:01:18,918 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=944a6b9062fa,39553,1733828474155 2024-12-10T11:01:18,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:18,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:18,921 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:01:18,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:18,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:18,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:18,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:18,922 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:01:18,922 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:18,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,923 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '944a6b9062fa,33263,1733828474924' ***** 2024-12-10T11:01:18,923 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:01:18,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '944a6b9062fa,45813,1733828475055' ***** 2024-12-10T11:01:18,924 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:01:18,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '944a6b9062fa,35357,1733828475121' ***** 2024-12-10T11:01:18,924 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:01:18,924 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:01:18,924 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:01:18,924 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:01:18,924 INFO [RS:0;944a6b9062fa:33263 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:01:18,924 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:01:18,924 INFO [RS:1;944a6b9062fa:45813 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:01:18,924 INFO [RS:2;944a6b9062fa:35357 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:01:18,924 INFO [RS:1;944a6b9062fa:45813 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:01:18,924 INFO [RS:0;944a6b9062fa:33263 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:01:18,924 INFO [RS:2;944a6b9062fa:35357 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:01:18,924 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(959): stopping server 944a6b9062fa,33263,1733828474924 2024-12-10T11:01:18,924 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(959): stopping server 944a6b9062fa,45813,1733828475055 2024-12-10T11:01:18,924 INFO [RS:0;944a6b9062fa:33263 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:18,924 INFO [RS:1;944a6b9062fa:45813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:18,925 INFO [RS:0;944a6b9062fa:33263 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;944a6b9062fa:33263. 2024-12-10T11:01:18,925 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(3091): Received CLOSE for 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,925 INFO [RS:1;944a6b9062fa:45813 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;944a6b9062fa:45813. 2024-12-10T11:01:18,925 DEBUG [RS:0;944a6b9062fa:33263 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:18,925 DEBUG [RS:0;944a6b9062fa:33263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,925 DEBUG [RS:1;944a6b9062fa:45813 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:18,925 DEBUG [RS:1;944a6b9062fa:45813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,925 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(976): stopping server 944a6b9062fa,33263,1733828474924; all regions closed. 2024-12-10T11:01:18,925 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(976): stopping server 944a6b9062fa,45813,1733828475055; all regions closed. 2024-12-10T11:01:18,925 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:01:18,925 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(959): stopping server 944a6b9062fa,35357,1733828475121 2024-12-10T11:01:18,925 INFO [RS:2;944a6b9062fa:35357 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:18,925 INFO [RS:2;944a6b9062fa:35357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;944a6b9062fa:35357. 2024-12-10T11:01:18,926 DEBUG [RS:2;944a6b9062fa:35357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:18,926 DEBUG [RS:2;944a6b9062fa:35357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,926 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:01:18,926 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 205ed45caa732e1de41041a7767b6145, disabling compactions & flushes 2024-12-10T11:01:18,926 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:01:18,926 INFO [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,926 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:01:18,926 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,926 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. after waiting 0 ms 2024-12-10T11:01:18,926 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T11:01:18,926 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,927 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-10T11:01:18,927 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1325): Online Regions={205ed45caa732e1de41041a7767b6145=TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145., 1588230740=hbase:meta,,1.1588230740} 2024-12-10T11:01:18,927 INFO [regionserver/944a6b9062fa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:18,927 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:01:18,927 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 205ed45caa732e1de41041a7767b6145 2024-12-10T11:01:18,927 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:01:18,927 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:01:18,927 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:01:18,928 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:01:18,928 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-10T11:01:18,931 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:01:18,932 INFO [regionserver/944a6b9062fa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:18,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_1073741827_1017 (size=93) 2024-12-10T11:01:18,936 INFO [regionserver/944a6b9062fa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:18,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_1073741826_1016 (size=93) 2024-12-10T11:01:18,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_1073741826_1016 (size=93) 2024-12-10T11:01:18,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_1073741827_1017 (size=93) 2024-12-10T11:01:18,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_1073741826_1016 (size=93) 2024-12-10T11:01:18,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_1073741827_1017 (size=93) 2024-12-10T11:01:18,943 DEBUG [RS:1;944a6b9062fa:45813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs 2024-12-10T11:01:18,943 INFO [RS:1;944a6b9062fa:45813 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 944a6b9062fa%2C45813%2C1733828475055:(num 1733828476703) 2024-12-10T11:01:18,943 DEBUG [RS:1;944a6b9062fa:45813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,943 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] hbase.ChoreService(370): Chore service for: regionserver/944a6b9062fa:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:01:18,944 DEBUG [RS:0;944a6b9062fa:33263 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:18,944 INFO [RS:0;944a6b9062fa:33263 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 944a6b9062fa%2C33263%2C1733828474924:(num 1733828476703) 2024-12-10T11:01:18,944 DEBUG [RS:0;944a6b9062fa:33263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:18,944 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:18,944 INFO [RS:0;944a6b9062fa:33263 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:18,944 INFO [RS:1;944a6b9062fa:45813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45813 2024-12-10T11:01:18,945 INFO [RS:0;944a6b9062fa:33263 {}] hbase.ChoreService(370): Chore service for: regionserver/944a6b9062fa:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:18,945 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:01:18,945 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:01:18,945 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:01:18,945 INFO [regionserver/944a6b9062fa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:18,945 INFO [RS:0;944a6b9062fa:33263 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:18,945 INFO [RS:0;944a6b9062fa:33263 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33263 2024-12-10T11:01:18,947 INFO [regionserver/944a6b9062fa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:18,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:18,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/944a6b9062fa,45813,1733828475055 2024-12-10T11:01:18,949 INFO [RS:1;944a6b9062fa:45813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:18,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/944a6b9062fa,33263,1733828474924 2024-12-10T11:01:18,949 INFO [RS:0;944a6b9062fa:33263 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:18,952 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [944a6b9062fa,33263,1733828474924] 2024-12-10T11:01:18,954 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/944a6b9062fa,33263,1733828474924 already deleted, retry=false 2024-12-10T11:01:18,954 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 944a6b9062fa,33263,1733828474924 expired; onlineServers=2 2024-12-10T11:01:18,954 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [944a6b9062fa,45813,1733828475055] 2024-12-10T11:01:18,955 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/944a6b9062fa,45813,1733828475055 already deleted, retry=false 2024-12-10T11:01:18,955 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 944a6b9062fa,45813,1733828475055 expired; onlineServers=1 2024-12-10T11:01:18,958 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/default/TestHBaseWalOnEC/205ed45caa732e1de41041a7767b6145/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T11:01:18,961 INFO [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,961 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 205ed45caa732e1de41041a7767b6145: Waiting for close lock at 1733828478926Running coprocessor pre-close hooks at 1733828478926Disabling compacts and flushes for region at 1733828478926Disabling writes for close at 1733828478926Writing region close event to WAL at 1733828478930 (+4 ms)Running coprocessor post-close hooks at 1733828478959 (+29 ms)Closed at 1733828478961 (+2 ms) 2024-12-10T11:01:18,962 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145. 2024-12-10T11:01:18,973 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/info/c84410623f714497b3c50170b791414b is 153, key is TestHBaseWalOnEC,,1733828477575.205ed45caa732e1de41041a7767b6145./info:regioninfo/1733828478042/Put/seqid=0 2024-12-10T11:01:18,976 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:18,976 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:18,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_532971149_22 at /127.0.0.1:45280 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:46195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45280 dst: /127.0.0.1:46195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:18,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-10T11:01:18,999 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,000 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/info/c84410623f714497b3c50170b791414b 2024-12-10T11:01:19,033 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/ns/e30f3c8d464646458f007094b341593f is 43, key is default/ns:d/1733828477319/Put/seqid=0 2024-12-10T11:01:19,037 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,037 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,044 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_532971149_22 at /127.0.0.1:45310 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45310 dst: /127.0.0.1:46195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:19,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-10T11:01:19,050 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,050 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/ns/e30f3c8d464646458f007094b341593f 2024-12-10T11:01:19,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33263-0x10176cde6a20001, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45813-0x10176cde6a20002, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,052 INFO [RS:0;944a6b9062fa:33263 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:19,052 INFO [RS:1;944a6b9062fa:45813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:19,053 INFO [RS:0;944a6b9062fa:33263 {}] regionserver.HRegionServer(1031): Exiting; stopping=944a6b9062fa,33263,1733828474924; zookeeper connection closed. 2024-12-10T11:01:19,053 INFO [RS:1;944a6b9062fa:45813 {}] regionserver.HRegionServer(1031): Exiting; stopping=944a6b9062fa,45813,1733828475055; zookeeper connection closed. 2024-12-10T11:01:19,053 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b99af3e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b99af3e 2024-12-10T11:01:19,053 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@956c330 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@956c330 2024-12-10T11:01:19,079 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/table/0e3033aa7d5640fcb64fab7c31ed5822 is 52, key is TestHBaseWalOnEC/table:state/1733828478059/Put/seqid=0 2024-12-10T11:01:19,081 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,081 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_532971149_22 at /127.0.0.1:51480 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51480 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:19,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-10T11:01:19,089 WARN [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,089 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/table/0e3033aa7d5640fcb64fab7c31ed5822 2024-12-10T11:01:19,101 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/info/c84410623f714497b3c50170b791414b as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/info/c84410623f714497b3c50170b791414b 2024-12-10T11:01:19,111 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/info/c84410623f714497b3c50170b791414b, entries=10, sequenceid=11, filesize=6.5 K 2024-12-10T11:01:19,113 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/ns/e30f3c8d464646458f007094b341593f as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/ns/e30f3c8d464646458f007094b341593f 2024-12-10T11:01:19,122 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/ns/e30f3c8d464646458f007094b341593f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-10T11:01:19,124 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/.tmp/table/0e3033aa7d5640fcb64fab7c31ed5822 as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/table/0e3033aa7d5640fcb64fab7c31ed5822 2024-12-10T11:01:19,128 DEBUG [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-10T11:01:19,135 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/table/0e3033aa7d5640fcb64fab7c31ed5822, entries=2, sequenceid=11, filesize=5.1 K 2024-12-10T11:01:19,137 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 209ms, sequenceid=11, compaction requested=false 2024-12-10T11:01:19,137 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T11:01:19,163 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-10T11:01:19,164 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T11:01:19,164 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:01:19,164 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733828478927Running coprocessor pre-close hooks at 1733828478927Disabling compacts and flushes for region at 1733828478927Disabling writes for close at 1733828478927Obtaining lock to block concurrent updates at 1733828478928 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733828478928Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733828478929 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733828478930 (+1 ms)Flushing 1588230740/info: creating writer at 1733828478930Flushing 1588230740/info: appending metadata at 1733828478968 (+38 ms)Flushing 1588230740/info: closing flushed file at 1733828478968Flushing 1588230740/ns: creating writer at 1733828479012 (+44 ms)Flushing 1588230740/ns: appending metadata at 1733828479032 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733828479032Flushing 1588230740/table: creating writer at 1733828479059 (+27 ms)Flushing 1588230740/table: appending metadata at 1733828479079 (+20 ms)Flushing 1588230740/table: closing flushed file at 1733828479079Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16f9431e: reopening flushed file at 1733828479100 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ef3cd35: reopening flushed file at 1733828479111 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f24d1cd: reopening flushed file at 1733828479123 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 209ms, sequenceid=11, compaction requested=false at 1733828479137 (+14 ms)Writing region close event to WAL at 1733828479144 (+7 ms)Running coprocessor post-close hooks at 1733828479164 (+20 ms)Closed at 1733828479164 2024-12-10T11:01:19,165 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T11:01:19,328 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(976): stopping server 944a6b9062fa,35357,1733828475121; all regions closed. 2024-12-10T11:01:19,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_1073741829_1019 (size=2751) 2024-12-10T11:01:19,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_1073741829_1019 (size=2751) 2024-12-10T11:01:19,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_1073741829_1019 (size=2751) 2024-12-10T11:01:19,337 DEBUG [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs 2024-12-10T11:01:19,337 INFO [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 944a6b9062fa%2C35357%2C1733828475121.meta:.meta(num 1733828477109) 2024-12-10T11:01:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_1073741828_1018 (size=1298) 2024-12-10T11:01:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_1073741828_1018 (size=1298) 2024-12-10T11:01:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_1073741828_1018 (size=1298) 2024-12-10T11:01:19,343 DEBUG [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/oldWALs 2024-12-10T11:01:19,343 INFO [RS:2;944a6b9062fa:35357 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 944a6b9062fa%2C35357%2C1733828475121:(num 1733828476702) 2024-12-10T11:01:19,343 DEBUG [RS:2;944a6b9062fa:35357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:19,343 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:19,343 INFO [RS:2;944a6b9062fa:35357 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:19,344 INFO [RS:2;944a6b9062fa:35357 {}] hbase.ChoreService(370): Chore service for: regionserver/944a6b9062fa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:19,344 INFO [RS:2;944a6b9062fa:35357 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:19,344 INFO [regionserver/944a6b9062fa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:19,344 INFO [RS:2;944a6b9062fa:35357 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35357 2024-12-10T11:01:19,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/944a6b9062fa,35357,1733828475121 2024-12-10T11:01:19,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:19,347 INFO [RS:2;944a6b9062fa:35357 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:19,348 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [944a6b9062fa,35357,1733828475121] 2024-12-10T11:01:19,350 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/944a6b9062fa,35357,1733828475121 already deleted, retry=false 2024-12-10T11:01:19,350 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 944a6b9062fa,35357,1733828475121 expired; onlineServers=0 2024-12-10T11:01:19,350 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '944a6b9062fa,39553,1733828474155' ***** 2024-12-10T11:01:19,350 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T11:01:19,350 INFO [M:0;944a6b9062fa:39553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:19,350 INFO [M:0;944a6b9062fa:39553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:19,350 DEBUG [M:0;944a6b9062fa:39553 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T11:01:19,351 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T11:01:19,351 DEBUG [M:0;944a6b9062fa:39553 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T11:01:19,351 DEBUG [master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.large.0-1733828476323 {}] cleaner.HFileCleaner(306): Exit Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.large.0-1733828476323,5,FailOnTimeoutGroup] 2024-12-10T11:01:19,351 DEBUG [master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.small.0-1733828476325 {}] cleaner.HFileCleaner(306): Exit Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.small.0-1733828476325,5,FailOnTimeoutGroup] 2024-12-10T11:01:19,351 INFO [M:0;944a6b9062fa:39553 {}] hbase.ChoreService(370): Chore service for: master/944a6b9062fa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:19,351 INFO [M:0;944a6b9062fa:39553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:19,351 DEBUG [M:0;944a6b9062fa:39553 {}] master.HMaster(1795): Stopping service threads 2024-12-10T11:01:19,351 INFO [M:0;944a6b9062fa:39553 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T11:01:19,351 INFO [M:0;944a6b9062fa:39553 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:01:19,352 INFO [M:0;944a6b9062fa:39553 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T11:01:19,352 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T11:01:19,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:19,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:19,353 DEBUG [M:0;944a6b9062fa:39553 {}] zookeeper.ZKUtil(347): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T11:01:19,353 WARN [M:0;944a6b9062fa:39553 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T11:01:19,354 INFO [M:0;944a6b9062fa:39553 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/.lastflushedseqids 2024-12-10T11:01:19,374 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,374 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51498 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51498 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:19,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-10T11:01:19,381 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,381 INFO [M:0;944a6b9062fa:39553 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T11:01:19,381 INFO [M:0;944a6b9062fa:39553 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T11:01:19,381 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:01:19,381 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:19,381 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:19,381 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:01:19,382 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:19,382 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-10T11:01:19,403 DEBUG [M:0;944a6b9062fa:39553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc3f50362c504845b7fd9d842281d145 is 82, key is hbase:meta,,1/info:regioninfo/1733828477247/Put/seqid=0 2024-12-10T11:01:19,405 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,405 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51522 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51522 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:19,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-10T11:01:19,413 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,413 INFO [M:0;944a6b9062fa:39553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc3f50362c504845b7fd9d842281d145 2024-12-10T11:01:19,447 DEBUG [M:0;944a6b9062fa:39553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4a1ffcee728e4b909742f54a4cfbd367 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733828478067/Put/seqid=0 2024-12-10T11:01:19,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,449 INFO [RS:2;944a6b9062fa:35357 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:19,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35357-0x10176cde6a20003, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,449 INFO [RS:2;944a6b9062fa:35357 {}] regionserver.HRegionServer(1031): Exiting; stopping=944a6b9062fa,35357,1733828475121; zookeeper connection closed. 2024-12-10T11:01:19,449 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4aff1ba3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4aff1ba3 2024-12-10T11:01:19,449 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,450 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,450 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T11:01:19,458 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51534 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51534 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:19,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-10T11:01:19,471 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,472 INFO [M:0;944a6b9062fa:39553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4a1ffcee728e4b909742f54a4cfbd367 2024-12-10T11:01:19,504 DEBUG [M:0;944a6b9062fa:39553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e31ad043fa604fc9afce40d438b8761b is 69, key is 944a6b9062fa,33263,1733828474924/rs:state/1733828476445/Put/seqid=0 2024-12-10T11:01:19,506 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,506 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:01:19,514 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1012754211_22 at /127.0.0.1:51550 [Receiving block BP-475784458-172.17.0.2-1733828470508:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51550 dst: /127.0.0.1:33677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:01:19,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-10T11:01:19,525 WARN [M:0;944a6b9062fa:39553 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:01:19,525 INFO [M:0;944a6b9062fa:39553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e31ad043fa604fc9afce40d438b8761b 2024-12-10T11:01:19,534 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc3f50362c504845b7fd9d842281d145 as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc3f50362c504845b7fd9d842281d145 2024-12-10T11:01:19,542 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc3f50362c504845b7fd9d842281d145, entries=8, sequenceid=72, filesize=5.5 K 2024-12-10T11:01:19,543 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4a1ffcee728e4b909742f54a4cfbd367 as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4a1ffcee728e4b909742f54a4cfbd367 2024-12-10T11:01:19,551 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4a1ffcee728e4b909742f54a4cfbd367, entries=8, sequenceid=72, filesize=6.3 K 2024-12-10T11:01:19,558 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e31ad043fa604fc9afce40d438b8761b as hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e31ad043fa604fc9afce40d438b8761b 2024-12-10T11:01:19,568 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e31ad043fa604fc9afce40d438b8761b, entries=3, sequenceid=72, filesize=5.2 K 2024-12-10T11:01:19,569 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=72, compaction requested=false 2024-12-10T11:01:19,578 INFO [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:19,578 DEBUG [M:0;944a6b9062fa:39553 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733828479381Disabling compacts and flushes for region at 1733828479381Disabling writes for close at 1733828479381Obtaining lock to block concurrent updates at 1733828479382 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733828479382Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733828479382Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733828479383 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733828479383Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733828479401 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733828479401Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733828479422 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733828479446 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733828479446Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733828479481 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733828479503 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733828479503Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c9278ab: reopening flushed file at 1733828479533 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17c3ddea: reopening flushed file at 1733828479542 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@704b7a8f: reopening flushed file at 1733828479552 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=72, compaction requested=false at 1733828479569 (+17 ms)Writing region close event to WAL at 1733828479578 (+9 ms)Closed at 1733828479578 2024-12-10T11:01:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33677 is added to blk_1073741825_1011 (size=32683) 2024-12-10T11:01:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46195 is added to blk_1073741825_1011 (size=32683) 2024-12-10T11:01:19,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35865 is added to blk_1073741825_1011 (size=32683) 2024-12-10T11:01:19,590 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:19,590 INFO [M:0;944a6b9062fa:39553 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T11:01:19,591 INFO [M:0;944a6b9062fa:39553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39553 2024-12-10T11:01:19,591 INFO [M:0;944a6b9062fa:39553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:19,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,693 INFO [M:0;944a6b9062fa:39553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:19,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39553-0x10176cde6a20000, quorum=127.0.0.1:52356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:19,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:19,706 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:19,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:19,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:19,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:19,710 WARN [BP-475784458-172.17.0.2-1733828470508 heartbeating to localhost/127.0.0.1:40209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:01:19,710 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:01:19,710 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:01:19,710 WARN [BP-475784458-172.17.0.2-1733828470508 heartbeating to localhost/127.0.0.1:40209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-475784458-172.17.0.2-1733828470508 (Datanode Uuid 4ae71281-d36b-49c9-a42e-a3f675ff9a7b) service to localhost/127.0.0.1:40209 2024-12-10T11:01:19,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data5/current/BP-475784458-172.17.0.2-1733828470508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:19,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data6/current/BP-475784458-172.17.0.2-1733828470508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:19,713 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:01:19,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:19,720 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:19,720 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:19,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:19,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:19,722 WARN [BP-475784458-172.17.0.2-1733828470508 heartbeating to localhost/127.0.0.1:40209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:01:19,722 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:01:19,722 WARN [BP-475784458-172.17.0.2-1733828470508 heartbeating to localhost/127.0.0.1:40209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-475784458-172.17.0.2-1733828470508 (Datanode Uuid a9b8a64d-fbbd-4134-9b04-4d7295a4be94) service to localhost/127.0.0.1:40209 2024-12-10T11:01:19,722 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:01:19,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data3/current/BP-475784458-172.17.0.2-1733828470508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:19,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data4/current/BP-475784458-172.17.0.2-1733828470508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:19,723 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:01:19,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:19,735 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:19,735 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:19,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:19,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:19,738 WARN [BP-475784458-172.17.0.2-1733828470508 heartbeating to localhost/127.0.0.1:40209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:01:19,738 WARN [BP-475784458-172.17.0.2-1733828470508 heartbeating to localhost/127.0.0.1:40209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-475784458-172.17.0.2-1733828470508 (Datanode Uuid 0bf4b057-e912-466f-be95-ded1180f5da9) service to localhost/127.0.0.1:40209 2024-12-10T11:01:19,738 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data1/current/BP-475784458-172.17.0.2-1733828470508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:19,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/cluster_c03f5107-4965-69c3-bb8c-f93d8b2223ba/data/data2/current/BP-475784458-172.17.0.2-1733828470508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:19,739 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:01:19,739 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:01:19,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:01:19,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:01:19,752 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:19,752 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:19,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:19,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:19,762 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T11:01:19,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-10T11:01:19,824 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 158), OpenFileDescriptor=447 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=989 (was 1014), ProcessCount=11 (was 11), AvailableMemoryMB=4461 (was 4804) 2024-12-10T11:01:19,833 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=989, ProcessCount=11, AvailableMemoryMB=4461 2024-12-10T11:01:19,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T11:01:19,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.log.dir so I do NOT create it in target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f 2024-12-10T11:01:19,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e92f3157-dfe4-7b25-a806-daeb8fadda19/hadoop.tmp.dir so I do NOT create it in target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f 2024-12-10T11:01:19,834 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35, deleteOnExit=true 2024-12-10T11:01:19,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-10T11:01:19,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/test.cache.data in system properties and HBase conf 2024-12-10T11:01:19,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T11:01:19,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir in system properties and HBase conf 2024-12-10T11:01:19,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T11:01:19,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T11:01:19,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T11:01:19,835 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T11:01:19,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:01:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:01:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T11:01:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/nfs.dump.dir in system properties and HBase conf 2024-12-10T11:01:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/java.io.tmpdir in system properties and HBase conf 2024-12-10T11:01:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:01:19,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T11:01:19,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T11:01:19,952 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:19,959 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:19,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:19,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:19,963 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:01:19,964 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:19,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2baf02db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:19,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18ee857{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:20,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a4689e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/java.io.tmpdir/jetty-localhost-43657-hadoop-hdfs-3_4_1-tests_jar-_-any-2659876172028124467/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:01:20,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53c85678{HTTP/1.1, (http/1.1)}{localhost:43657} 2024-12-10T11:01:20,110 INFO [Time-limited test {}] server.Server(415): Started @12014ms 2024-12-10T11:01:20,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:20,233 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:20,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:20,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:20,238 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:01:20,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1841cfc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:20,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d1c78c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:20,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@760f4a1c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/java.io.tmpdir/jetty-localhost-38893-hadoop-hdfs-3_4_1-tests_jar-_-any-2108721390777622996/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:20,382 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e494f88{HTTP/1.1, (http/1.1)}{localhost:38893} 2024-12-10T11:01:20,382 INFO [Time-limited test {}] server.Server(415): Started @12286ms 2024-12-10T11:01:20,385 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:01:20,433 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:20,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:20,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:20,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:20,438 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:01:20,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55fefad1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:20,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@564d8641{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:20,492 WARN [Thread-526 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data1/current/BP-527819418-172.17.0.2-1733828479876/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:20,493 WARN [Thread-527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data2/current/BP-527819418-172.17.0.2-1733828479876/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:20,530 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:01:20,534 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd8f7fcb906ddd28 with lease ID 0x194d94097a8362e7: Processing first storage report for DS-d9b360cb-a010-459b-b054-dab07639952e from datanode DatanodeRegistration(127.0.0.1:46281, datanodeUuid=b2e29257-bd11-4237-b1d9-99f8f9926cb6, infoPort=35079, infoSecurePort=0, ipcPort=40371, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876) 2024-12-10T11:01:20,535 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd8f7fcb906ddd28 with lease ID 0x194d94097a8362e7: from storage DS-d9b360cb-a010-459b-b054-dab07639952e node DatanodeRegistration(127.0.0.1:46281, datanodeUuid=b2e29257-bd11-4237-b1d9-99f8f9926cb6, infoPort=35079, infoSecurePort=0, ipcPort=40371, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:20,535 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd8f7fcb906ddd28 with lease ID 0x194d94097a8362e7: Processing first storage report for DS-6def2018-2ef1-438f-a522-be8cdcf0e1e1 from datanode DatanodeRegistration(127.0.0.1:46281, datanodeUuid=b2e29257-bd11-4237-b1d9-99f8f9926cb6, infoPort=35079, infoSecurePort=0, ipcPort=40371, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876) 2024-12-10T11:01:20,535 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd8f7fcb906ddd28 with lease ID 0x194d94097a8362e7: from storage DS-6def2018-2ef1-438f-a522-be8cdcf0e1e1 node DatanodeRegistration(127.0.0.1:46281, datanodeUuid=b2e29257-bd11-4237-b1d9-99f8f9926cb6, infoPort=35079, infoSecurePort=0, ipcPort=40371, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:20,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25106b03{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/java.io.tmpdir/jetty-localhost-38783-hadoop-hdfs-3_4_1-tests_jar-_-any-10812255522629764671/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:20,583 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ae51624{HTTP/1.1, (http/1.1)}{localhost:38783} 2024-12-10T11:01:20,583 INFO [Time-limited test {}] server.Server(415): Started @12487ms 2024-12-10T11:01:20,585 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:01:20,641 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:01:20,650 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:01:20,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:01:20,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:01:20,658 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:01:20,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e56045e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:01:20,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@328032c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:01:20,713 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data3/current/BP-527819418-172.17.0.2-1733828479876/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:20,713 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data4/current/BP-527819418-172.17.0.2-1733828479876/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:20,738 WARN [Thread-541 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:01:20,741 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32bff2fcb7cc6ef0 with lease ID 0x194d94097a8362e8: Processing first storage report for DS-cefcb21b-c98c-4a9b-89e1-65907c52e413 from datanode DatanodeRegistration(127.0.0.1:37069, datanodeUuid=2a03b13b-bfdc-4c6f-8a9c-914822753a8c, infoPort=35539, infoSecurePort=0, ipcPort=36801, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876) 2024-12-10T11:01:20,741 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32bff2fcb7cc6ef0 with lease ID 0x194d94097a8362e8: from storage DS-cefcb21b-c98c-4a9b-89e1-65907c52e413 node DatanodeRegistration(127.0.0.1:37069, datanodeUuid=2a03b13b-bfdc-4c6f-8a9c-914822753a8c, infoPort=35539, infoSecurePort=0, ipcPort=36801, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:20,741 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32bff2fcb7cc6ef0 with lease ID 0x194d94097a8362e8: Processing first storage report for DS-49a2b306-aeb4-4fc4-b28e-f5485d73b2f8 from datanode DatanodeRegistration(127.0.0.1:37069, datanodeUuid=2a03b13b-bfdc-4c6f-8a9c-914822753a8c, infoPort=35539, infoSecurePort=0, ipcPort=36801, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876) 2024-12-10T11:01:20,741 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32bff2fcb7cc6ef0 with lease ID 0x194d94097a8362e8: from storage DS-49a2b306-aeb4-4fc4-b28e-f5485d73b2f8 node DatanodeRegistration(127.0.0.1:37069, datanodeUuid=2a03b13b-bfdc-4c6f-8a9c-914822753a8c, infoPort=35539, infoSecurePort=0, ipcPort=36801, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:20,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ff8a873{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/java.io.tmpdir/jetty-localhost-36013-hadoop-hdfs-3_4_1-tests_jar-_-any-9014466562301046033/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:20,793 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ec581d{HTTP/1.1, (http/1.1)}{localhost:36013} 2024-12-10T11:01:20,793 INFO [Time-limited test {}] server.Server(415): Started @12697ms 2024-12-10T11:01:20,796 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:01:20,917 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data5/current/BP-527819418-172.17.0.2-1733828479876/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:20,922 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data6/current/BP-527819418-172.17.0.2-1733828479876/current, will proceed with Du for space computation calculation, 2024-12-10T11:01:20,965 WARN [Thread-576 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:01:20,968 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c75374994af504f with lease ID 0x194d94097a8362e9: Processing first storage report for DS-e04fa877-e16a-4a97-b49c-cd25b501241a from datanode DatanodeRegistration(127.0.0.1:39269, datanodeUuid=916dd368-7964-443b-8ee1-f72da2d6f5e5, infoPort=32901, infoSecurePort=0, ipcPort=45605, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876) 2024-12-10T11:01:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c75374994af504f with lease ID 0x194d94097a8362e9: from storage DS-e04fa877-e16a-4a97-b49c-cd25b501241a node DatanodeRegistration(127.0.0.1:39269, datanodeUuid=916dd368-7964-443b-8ee1-f72da2d6f5e5, infoPort=32901, infoSecurePort=0, ipcPort=45605, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c75374994af504f with lease ID 0x194d94097a8362e9: Processing first storage report for DS-2c1e81c7-9a10-47ad-9065-b390443ac0f8 from datanode DatanodeRegistration(127.0.0.1:39269, datanodeUuid=916dd368-7964-443b-8ee1-f72da2d6f5e5, infoPort=32901, infoSecurePort=0, ipcPort=45605, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876) 2024-12-10T11:01:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c75374994af504f with lease ID 0x194d94097a8362e9: from storage DS-2c1e81c7-9a10-47ad-9065-b390443ac0f8 node DatanodeRegistration(127.0.0.1:39269, datanodeUuid=916dd368-7964-443b-8ee1-f72da2d6f5e5, infoPort=32901, infoSecurePort=0, ipcPort=45605, storageInfo=lv=-57;cid=testClusterID;nsid=1593498877;c=1733828479876), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:01:21,037 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f 2024-12-10T11:01:21,041 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/zookeeper_0, clientPort=56759, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T11:01:21,042 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56759 2024-12-10T11:01:21,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,044 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741825_1001 (size=7) 2024-12-10T11:01:21,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741825_1001 (size=7) 2024-12-10T11:01:21,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741825_1001 (size=7) 2024-12-10T11:01:21,060 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686 with version=8 2024-12-10T11:01:21,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40209/user/jenkins/test-data/e654855e-5a55-bbee-b1c5-704a71ed038a/hbase-staging 2024-12-10T11:01:21,062 INFO [Time-limited test {}] client.ConnectionUtils(128): master/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:21,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,063 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:21,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:21,063 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T11:01:21,063 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:21,064 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35283 2024-12-10T11:01:21,065 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35283 connecting to ZooKeeper ensemble=127.0.0.1:56759 2024-12-10T11:01:21,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352830x0, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:21,076 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35283-0x10176ce04ba0000 connected 2024-12-10T11:01:21,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,097 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:21,097 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686, hbase.cluster.distributed=false 2024-12-10T11:01:21,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:21,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35283 2024-12-10T11:01:21,105 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35283 2024-12-10T11:01:21,105 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35283 2024-12-10T11:01:21,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35283 2024-12-10T11:01:21,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35283 2024-12-10T11:01:21,125 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:01:21,125 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:21,126 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39659 2024-12-10T11:01:21,128 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39659 connecting to ZooKeeper ensemble=127.0.0.1:56759 2024-12-10T11:01:21,129 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,132 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396590x0, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:21,138 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:396590x0, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:21,138 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:01:21,138 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39659-0x10176ce04ba0001 connected 2024-12-10T11:01:21,139 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:01:21,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:01:21,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:21,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39659 2024-12-10T11:01:21,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39659 2024-12-10T11:01:21,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39659 2024-12-10T11:01:21,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39659 2024-12-10T11:01:21,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39659 2024-12-10T11:01:21,159 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:21,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,159 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:21,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:21,160 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:01:21,160 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:21,161 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36563 2024-12-10T11:01:21,163 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36563 connecting to ZooKeeper ensemble=127.0.0.1:56759 2024-12-10T11:01:21,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365630x0, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:21,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36563-0x10176ce04ba0002 connected 2024-12-10T11:01:21,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:21,176 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:01:21,177 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:01:21,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:01:21,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:21,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36563 2024-12-10T11:01:21,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36563 2024-12-10T11:01:21,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36563 2024-12-10T11:01:21,189 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36563 2024-12-10T11:01:21,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36563 2024-12-10T11:01:21,207 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/944a6b9062fa:0 server-side Connection retries=45 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:01:21,208 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:01:21,209 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39637 2024-12-10T11:01:21,210 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39637 connecting to ZooKeeper ensemble=127.0.0.1:56759 2024-12-10T11:01:21,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,213 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396370x0, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:01:21,218 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39637-0x10176ce04ba0003 connected 2024-12-10T11:01:21,218 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:21,219 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:01:21,226 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:01:21,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:01:21,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:01:21,233 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39637 2024-12-10T11:01:21,233 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39637 2024-12-10T11:01:21,237 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39637 2024-12-10T11:01:21,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39637 2024-12-10T11:01:21,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39637 2024-12-10T11:01:21,253 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;944a6b9062fa:35283 2024-12-10T11:01:21,253 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,257 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:21,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:21,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:21,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,262 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T11:01:21,263 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/944a6b9062fa,35283,1733828481062 from backup master directory 2024-12-10T11:01:21,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:01:21,266 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:21,266 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,275 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/hbase.id] with ID: 6b9af954-a508-432d-97de-a33d93ce23fe 2024-12-10T11:01:21,275 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/.tmp/hbase.id 2024-12-10T11:01:21,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741826_1002 (size=42) 2024-12-10T11:01:21,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741826_1002 (size=42) 2024-12-10T11:01:21,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741826_1002 (size=42) 2024-12-10T11:01:21,285 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/.tmp/hbase.id]:[hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/hbase.id] 2024-12-10T11:01:21,303 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:01:21,303 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T11:01:21,305 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-10T11:01:21,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741827_1003 (size=196) 2024-12-10T11:01:21,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741827_1003 (size=196) 2024-12-10T11:01:21,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741827_1003 (size=196) 2024-12-10T11:01:21,320 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:01:21,321 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T11:01:21,321 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:01:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741828_1004 (size=1189) 2024-12-10T11:01:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741828_1004 (size=1189) 2024-12-10T11:01:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741828_1004 (size=1189) 2024-12-10T11:01:21,336 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store 2024-12-10T11:01:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741829_1005 (size=34) 2024-12-10T11:01:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741829_1005 (size=34) 2024-12-10T11:01:21,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741829_1005 (size=34) 2024-12-10T11:01:21,347 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:21,347 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:01:21,347 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:21,347 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:21,347 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:01:21,347 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:21,347 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:21,347 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733828481347Disabling compacts and flushes for region at 1733828481347Disabling writes for close at 1733828481347Writing region close event to WAL at 1733828481347Closed at 1733828481347 2024-12-10T11:01:21,348 WARN [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/.initializing 2024-12-10T11:01:21,348 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/WALs/944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,353 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C35283%2C1733828481062, suffix=, logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/WALs/944a6b9062fa,35283,1733828481062, archiveDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/oldWALs, maxLogs=10 2024-12-10T11:01:21,354 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 944a6b9062fa%2C35283%2C1733828481062.1733828481353 2024-12-10T11:01:21,363 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/WALs/944a6b9062fa,35283,1733828481062/944a6b9062fa%2C35283%2C1733828481062.1733828481353 2024-12-10T11:01:21,365 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539),(127.0.0.1/127.0.0.1:32901:32901),(127.0.0.1/127.0.0.1:35079:35079)] 2024-12-10T11:01:21,367 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:01:21,367 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:21,367 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,367 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T11:01:21,376 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:21,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T11:01:21,379 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:21,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T11:01:21,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:21,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T11:01:21,387 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,388 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:21,388 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,389 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,389 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,391 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,391 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,392 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:01:21,393 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:01:21,396 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:01:21,397 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75067674, jitterRate=0.11859551072120667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:01:21,398 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733828481368Initializing all the Stores at 1733828481369 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828481369Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828481373 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828481373Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828481373Cleaning up temporary data from old regions at 1733828481391 (+18 ms)Region opened successfully at 1733828481397 (+6 ms) 2024-12-10T11:01:21,398 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T11:01:21,403 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e5c76a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:21,404 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T11:01:21,405 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T11:01:21,405 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T11:01:21,405 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T11:01:21,405 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-10T11:01:21,406 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-10T11:01:21,406 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T11:01:21,409 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T11:01:21,410 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T11:01:21,412 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T11:01:21,412 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T11:01:21,413 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T11:01:21,414 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T11:01:21,414 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T11:01:21,415 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T11:01:21,417 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T11:01:21,418 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T11:01:21,420 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T11:01:21,423 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T11:01:21,424 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:21,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,428 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=944a6b9062fa,35283,1733828481062, sessionid=0x10176ce04ba0000, setting cluster-up flag (Was=false) 2024-12-10T11:01:21,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,437 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T11:01:21,439 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,448 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T11:01:21,449 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=944a6b9062fa,35283,1733828481062 2024-12-10T11:01:21,451 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T11:01:21,453 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:21,453 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T11:01:21,454 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T11:01:21,454 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 944a6b9062fa,35283,1733828481062 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/944a6b9062fa:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/944a6b9062fa:0, corePoolSize=10, maxPoolSize=10 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,455 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:21,456 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,456 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733828511456 2024-12-10T11:01:21,456 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T11:01:21,457 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T11:01:21,457 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T11:01:21,457 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T11:01:21,457 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T11:01:21,457 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T11:01:21,457 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,458 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:21,458 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T11:01:21,459 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,459 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T11:01:21,466 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T11:01:21,466 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T11:01:21,466 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T11:01:21,468 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T11:01:21,468 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T11:01:21,471 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.large.0-1733828481468,5,FailOnTimeoutGroup] 2024-12-10T11:01:21,472 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.small.0-1733828481471,5,FailOnTimeoutGroup] 2024-12-10T11:01:21,472 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,472 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T11:01:21,472 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,472 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741831_1007 (size=1321) 2024-12-10T11:01:21,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741831_1007 (size=1321) 2024-12-10T11:01:21,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741831_1007 (size=1321) 2024-12-10T11:01:21,482 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T11:01:21,482 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686 2024-12-10T11:01:21,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741832_1008 (size=32) 2024-12-10T11:01:21,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741832_1008 (size=32) 2024-12-10T11:01:21,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741832_1008 (size=32) 2024-12-10T11:01:21,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:21,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:01:21,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:01:21,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:21,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:01:21,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:01:21,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:21,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:01:21,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:01:21,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:21,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:01:21,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:01:21,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:21,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:21,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:01:21,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740 2024-12-10T11:01:21,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740 2024-12-10T11:01:21,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:01:21,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:01:21,508 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:01:21,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:01:21,512 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:01:21,512 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73276246, jitterRate=0.09190115332603455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:01:21,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733828481493Initializing all the Stores at 1733828481494 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828481494Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828481494Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828481494Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828481494Cleaning up temporary data from old regions at 1733828481507 (+13 ms)Region opened successfully at 1733828481513 (+6 ms) 2024-12-10T11:01:21,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:01:21,513 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:01:21,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:01:21,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:01:21,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:01:21,514 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:01:21,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733828481513Disabling compacts and flushes for region at 1733828481513Disabling writes for close at 1733828481513Writing region close event to WAL at 1733828481514 (+1 ms)Closed at 1733828481514 2024-12-10T11:01:21,516 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:21,516 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T11:01:21,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T11:01:21,518 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:01:21,520 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T11:01:21,542 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(746): ClusterId : 6b9af954-a508-432d-97de-a33d93ce23fe 2024-12-10T11:01:21,542 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(746): ClusterId : 6b9af954-a508-432d-97de-a33d93ce23fe 2024-12-10T11:01:21,542 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(746): ClusterId : 6b9af954-a508-432d-97de-a33d93ce23fe 2024-12-10T11:01:21,542 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:01:21,542 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:01:21,542 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:01:21,546 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:01:21,546 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:01:21,547 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:01:21,547 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:01:21,549 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:01:21,549 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:01:21,549 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:01:21,550 DEBUG [RS:2;944a6b9062fa:39637 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c5268f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:21,551 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:01:21,551 DEBUG [RS:1;944a6b9062fa:36563 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b5c6999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:21,554 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:01:21,554 DEBUG [RS:0;944a6b9062fa:39659 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d8a2900, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=944a6b9062fa/172.17.0.2:0 2024-12-10T11:01:21,564 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;944a6b9062fa:36563 2024-12-10T11:01:21,564 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;944a6b9062fa:39637 2024-12-10T11:01:21,564 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:01:21,564 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:01:21,564 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:01:21,564 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:01:21,564 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:01:21,565 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:01:21,565 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,35283,1733828481062 with port=39637, startcode=1733828481207 2024-12-10T11:01:21,565 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,35283,1733828481062 with port=36563, startcode=1733828481159 2024-12-10T11:01:21,566 DEBUG [RS:2;944a6b9062fa:39637 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:01:21,566 DEBUG [RS:1;944a6b9062fa:36563 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:01:21,568 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48983, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:01:21,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35283 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 944a6b9062fa,36563,1733828481159 2024-12-10T11:01:21,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35283 {}] master.ServerManager(517): Registering regionserver=944a6b9062fa,36563,1733828481159 2024-12-10T11:01:21,570 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46049, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:01:21,572 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35283 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,572 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35283 {}] master.ServerManager(517): Registering regionserver=944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,572 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686 2024-12-10T11:01:21,572 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34775 2024-12-10T11:01:21,572 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:01:21,574 DEBUG [RS:0;944a6b9062fa:39659 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;944a6b9062fa:39659 2024-12-10T11:01:21,574 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:01:21,574 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:01:21,574 DEBUG [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:01:21,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:21,575 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686 2024-12-10T11:01:21,575 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34775 2024-12-10T11:01:21,575 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:01:21,575 DEBUG [RS:1;944a6b9062fa:36563 {}] zookeeper.ZKUtil(111): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/944a6b9062fa,36563,1733828481159 2024-12-10T11:01:21,575 WARN [RS:1;944a6b9062fa:36563 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:21,575 INFO [RS:1;944a6b9062fa:36563 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:01:21,575 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(2659): reportForDuty to master=944a6b9062fa,35283,1733828481062 with port=39659, startcode=1733828481125 2024-12-10T11:01:21,575 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,36563,1733828481159 2024-12-10T11:01:21,576 DEBUG [RS:0;944a6b9062fa:39659 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:01:21,576 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [944a6b9062fa,36563,1733828481159] 2024-12-10T11:01:21,577 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57589, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:01:21,578 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35283 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 944a6b9062fa,39659,1733828481125 2024-12-10T11:01:21,578 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35283 {}] master.ServerManager(517): Registering regionserver=944a6b9062fa,39659,1733828481125 2024-12-10T11:01:21,580 DEBUG [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686 2024-12-10T11:01:21,580 DEBUG [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34775 2024-12-10T11:01:21,580 DEBUG [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:01:21,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:21,584 DEBUG [RS:2;944a6b9062fa:39637 {}] zookeeper.ZKUtil(111): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,584 WARN [RS:2;944a6b9062fa:39637 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:21,584 INFO [RS:2;944a6b9062fa:39637 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:01:21,584 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,584 DEBUG [RS:0;944a6b9062fa:39659 {}] zookeeper.ZKUtil(111): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/944a6b9062fa,39659,1733828481125 2024-12-10T11:01:21,585 WARN [RS:0;944a6b9062fa:39659 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:01:21,585 INFO [RS:0;944a6b9062fa:39659 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:01:21,585 DEBUG [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39659,1733828481125 2024-12-10T11:01:21,585 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [944a6b9062fa,39659,1733828481125] 2024-12-10T11:01:21,585 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [944a6b9062fa,39637,1733828481207] 2024-12-10T11:01:21,586 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:01:21,591 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:01:21,597 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:01:21,597 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:01:21,600 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:01:21,601 INFO [RS:1;944a6b9062fa:36563 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:01:21,601 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,605 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:01:21,605 INFO [RS:2;944a6b9062fa:39637 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:01:21,605 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,606 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:01:21,606 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:01:21,606 INFO [RS:0;944a6b9062fa:39659 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:01:21,606 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,606 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:01:21,606 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:21,607 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:21,607 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,607 DEBUG [RS:1;944a6b9062fa:36563 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:21,607 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,608 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:21,609 DEBUG [RS:2;944a6b9062fa:39637 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:21,609 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:01:21,610 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:01:21,610 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/944a6b9062fa:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,610 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,611 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,611 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,611 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,611 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/944a6b9062fa:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:01:21,611 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:21,611 DEBUG [RS:0;944a6b9062fa:39659 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:01:21,617 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,36563,1733828481159-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,617 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,618 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,618 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39637,1733828481207-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:21,618 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39659,1733828481125-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:21,635 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:01:21,635 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39637,1733828481207-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,635 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,635 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.Replication(171): 944a6b9062fa,39637,1733828481207 started 2024-12-10T11:01:21,637 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:01:21,637 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,39659,1733828481125-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,637 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,637 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.Replication(171): 944a6b9062fa,39659,1733828481125 started 2024-12-10T11:01:21,640 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:01:21,641 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,36563,1733828481159-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,641 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,641 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.Replication(171): 944a6b9062fa,36563,1733828481159 started 2024-12-10T11:01:21,650 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,650 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1482): Serving as 944a6b9062fa,39637,1733828481207, RpcServer on 944a6b9062fa/172.17.0.2:39637, sessionid=0x10176ce04ba0003 2024-12-10T11:01:21,651 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:01:21,651 DEBUG [RS:2;944a6b9062fa:39637 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,651 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,39637,1733828481207' 2024-12-10T11:01:21,651 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:01:21,651 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:01:21,652 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:01:21,652 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:01:21,652 DEBUG [RS:2;944a6b9062fa:39637 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,652 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,39637,1733828481207' 2024-12-10T11:01:21,652 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:01:21,653 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:01:21,654 DEBUG [RS:2;944a6b9062fa:39637 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:01:21,654 INFO [RS:2;944a6b9062fa:39637 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:01:21,654 INFO [RS:2;944a6b9062fa:39637 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:01:21,657 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,657 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(1482): Serving as 944a6b9062fa,39659,1733828481125, RpcServer on 944a6b9062fa/172.17.0.2:39659, sessionid=0x10176ce04ba0001 2024-12-10T11:01:21,657 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:01:21,657 DEBUG [RS:0;944a6b9062fa:39659 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 944a6b9062fa,39659,1733828481125 2024-12-10T11:01:21,657 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,39659,1733828481125' 2024-12-10T11:01:21,657 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:01:21,658 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:01:21,658 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:01:21,658 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:01:21,658 DEBUG [RS:0;944a6b9062fa:39659 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 944a6b9062fa,39659,1733828481125 2024-12-10T11:01:21,658 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,39659,1733828481125' 2024-12-10T11:01:21,659 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:01:21,659 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:01:21,660 DEBUG [RS:0;944a6b9062fa:39659 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:01:21,660 INFO [RS:0;944a6b9062fa:39659 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:01:21,660 INFO [RS:0;944a6b9062fa:39659 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:01:21,662 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:21,663 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1482): Serving as 944a6b9062fa,36563,1733828481159, RpcServer on 944a6b9062fa/172.17.0.2:36563, sessionid=0x10176ce04ba0002 2024-12-10T11:01:21,663 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:01:21,663 DEBUG [RS:1;944a6b9062fa:36563 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 944a6b9062fa,36563,1733828481159 2024-12-10T11:01:21,663 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,36563,1733828481159' 2024-12-10T11:01:21,663 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:01:21,663 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:01:21,664 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:01:21,664 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:01:21,664 DEBUG [RS:1;944a6b9062fa:36563 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 944a6b9062fa,36563,1733828481159 2024-12-10T11:01:21,664 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '944a6b9062fa,36563,1733828481159' 2024-12-10T11:01:21,664 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:01:21,665 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:01:21,666 DEBUG [RS:1;944a6b9062fa:36563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:01:21,666 INFO [RS:1;944a6b9062fa:36563 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:01:21,666 INFO [RS:1;944a6b9062fa:36563 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:01:21,670 WARN [944a6b9062fa:35283 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T11:01:21,757 INFO [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C39637%2C1733828481207, suffix=, logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39637,1733828481207, archiveDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs, maxLogs=32 2024-12-10T11:01:21,760 INFO [RS:2;944a6b9062fa:39637 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 944a6b9062fa%2C39637%2C1733828481207.1733828481759 2024-12-10T11:01:21,763 INFO [RS:0;944a6b9062fa:39659 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C39659%2C1733828481125, suffix=, logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39659,1733828481125, archiveDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs, maxLogs=32 2024-12-10T11:01:21,765 INFO [RS:0;944a6b9062fa:39659 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 944a6b9062fa%2C39659%2C1733828481125.1733828481765 2024-12-10T11:01:21,768 INFO [RS:1;944a6b9062fa:36563 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C36563%2C1733828481159, suffix=, logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,36563,1733828481159, archiveDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs, maxLogs=32 2024-12-10T11:01:21,769 INFO [RS:1;944a6b9062fa:36563 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 944a6b9062fa%2C36563%2C1733828481159.1733828481769 2024-12-10T11:01:21,778 INFO [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39637,1733828481207/944a6b9062fa%2C39637%2C1733828481207.1733828481759 2024-12-10T11:01:21,782 DEBUG [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32901:32901),(127.0.0.1/127.0.0.1:35079:35079),(127.0.0.1/127.0.0.1:35539:35539)] 2024-12-10T11:01:21,783 INFO [RS:0;944a6b9062fa:39659 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39659,1733828481125/944a6b9062fa%2C39659%2C1733828481125.1733828481765 2024-12-10T11:01:21,785 INFO [RS:1;944a6b9062fa:36563 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,36563,1733828481159/944a6b9062fa%2C36563%2C1733828481159.1733828481769 2024-12-10T11:01:21,785 DEBUG [RS:0;944a6b9062fa:39659 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32901:32901),(127.0.0.1/127.0.0.1:35079:35079),(127.0.0.1/127.0.0.1:35539:35539)] 2024-12-10T11:01:21,788 DEBUG [RS:1;944a6b9062fa:36563 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539),(127.0.0.1/127.0.0.1:35079:35079),(127.0.0.1/127.0.0.1:32901:32901)] 2024-12-10T11:01:21,921 DEBUG [944a6b9062fa:35283 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T11:01:21,921 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(204): Hosts are {944a6b9062fa=0} racks are {/default-rack=0} 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:01:21,924 INFO [944a6b9062fa:35283 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:01:21,924 INFO [944a6b9062fa:35283 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:01:21,924 INFO [944a6b9062fa:35283 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:01:21,924 DEBUG [944a6b9062fa:35283 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:01:21,924 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=944a6b9062fa,39637,1733828481207 2024-12-10T11:01:21,927 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 944a6b9062fa,39637,1733828481207, state=OPENING 2024-12-10T11:01:21,928 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T11:01:21,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:21,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:21,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:21,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:21,931 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:01:21,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:21,932 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=944a6b9062fa,39637,1733828481207}] 2024-12-10T11:01:22,087 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T11:01:22,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42695, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T11:01:22,095 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T11:01:22,095 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:01:22,098 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=944a6b9062fa%2C39637%2C1733828481207.meta, suffix=.meta, logDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39637,1733828481207, archiveDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs, maxLogs=32 2024-12-10T11:01:22,099 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 944a6b9062fa%2C39637%2C1733828481207.meta.1733828482099.meta 2024-12-10T11:01:22,114 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/WALs/944a6b9062fa,39637,1733828481207/944a6b9062fa%2C39637%2C1733828481207.meta.1733828482099.meta 2024-12-10T11:01:22,126 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35079:35079),(127.0.0.1/127.0.0.1:35539:35539),(127.0.0.1/127.0.0.1:32901:32901)] 2024-12-10T11:01:22,132 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:01:22,132 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T11:01:22,132 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T11:01:22,133 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T11:01:22,133 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T11:01:22,133 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:22,133 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T11:01:22,133 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T11:01:22,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:01:22,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:01:22,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:22,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:22,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:01:22,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:01:22,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:22,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:22,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:01:22,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:01:22,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:22,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:22,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:01:22,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:01:22,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:22,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:01:22,144 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:01:22,145 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740 2024-12-10T11:01:22,148 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740 2024-12-10T11:01:22,150 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:01:22,150 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:01:22,150 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:01:22,152 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:01:22,153 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67452366, jitterRate=0.005118578672409058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:01:22,153 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T11:01:22,155 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733828482133Writing region info on filesystem at 1733828482133Initializing all the Stores at 1733828482135 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828482135Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828482135Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828482135Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733828482135Cleaning up temporary data from old regions at 1733828482150 (+15 ms)Running coprocessor post-open hooks at 1733828482154 (+4 ms)Region opened successfully at 1733828482155 (+1 ms) 2024-12-10T11:01:22,157 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733828482086 2024-12-10T11:01:22,161 DEBUG [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T11:01:22,161 INFO [RS_OPEN_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T11:01:22,162 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=944a6b9062fa,39637,1733828481207 2024-12-10T11:01:22,164 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 944a6b9062fa,39637,1733828481207, state=OPEN 2024-12-10T11:01:22,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:22,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:22,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:22,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:01:22,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:22,166 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=944a6b9062fa,39637,1733828481207 2024-12-10T11:01:22,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:22,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:22,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:01:22,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T11:01:22,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=944a6b9062fa,39637,1733828481207 in 235 msec 2024-12-10T11:01:22,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T11:01:22,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 655 msec 2024-12-10T11:01:22,178 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:01:22,178 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T11:01:22,180 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:01:22,180 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=944a6b9062fa,39637,1733828481207, seqNum=-1] 2024-12-10T11:01:22,180 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:01:22,183 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41475, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:01:22,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 737 msec 2024-12-10T11:01:22,192 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733828482191, completionTime=-1 2024-12-10T11:01:22,192 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T11:01:22,192 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T11:01:22,194 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T11:01:22,194 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733828542194 2024-12-10T11:01:22,194 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733828602194 2024-12-10T11:01:22,195 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-10T11:01:22,195 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35283,1733828481062-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,195 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35283,1733828481062-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,195 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35283,1733828481062-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,195 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-944a6b9062fa:35283, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,195 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,198 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,201 DEBUG [master/944a6b9062fa:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T11:01:22,203 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.937sec 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35283,1733828481062-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:01:22,204 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35283,1733828481062-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T11:01:22,208 DEBUG [master/944a6b9062fa:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T11:01:22,208 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T11:01:22,208 INFO [master/944a6b9062fa:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=944a6b9062fa,35283,1733828481062-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:01:22,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d5d2159, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:01:22,243 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 944a6b9062fa,35283,-1 for getting cluster id 2024-12-10T11:01:22,243 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T11:01:22,245 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6b9af954-a508-432d-97de-a33d93ce23fe' 2024-12-10T11:01:22,245 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T11:01:22,246 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6b9af954-a508-432d-97de-a33d93ce23fe" 2024-12-10T11:01:22,246 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3297119e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:01:22,246 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [944a6b9062fa,35283,-1] 2024-12-10T11:01:22,246 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T11:01:22,247 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:22,248 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39352, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T11:01:22,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f519320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:01:22,250 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:01:22,251 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=944a6b9062fa,39637,1733828481207, seqNum=-1] 2024-12-10T11:01:22,252 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:01:22,254 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44428, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:01:22,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=944a6b9062fa,35283,1733828481062 2024-12-10T11:01:22,257 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T11:01:22,258 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 944a6b9062fa,35283,1733828481062 2024-12-10T11:01:22,258 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1b8d0d 2024-12-10T11:01:22,258 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T11:01:22,260 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T11:01:22,261 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:01:22,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-10T11:01:22,266 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T11:01:22,266 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:22,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-10T11:01:22,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:22,268 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T11:01:22,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741837_1013 (size=392) 2024-12-10T11:01:22,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741837_1013 (size=392) 2024-12-10T11:01:22,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741837_1013 (size=392) 2024-12-10T11:01:22,282 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a9d72313cfaf22ef37e9982c9fa1b308, NAME => 'TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686 2024-12-10T11:01:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741838_1014 (size=51) 2024-12-10T11:01:22,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741838_1014 (size=51) 2024-12-10T11:01:22,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741838_1014 (size=51) 2024-12-10T11:01:22,302 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:22,303 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing a9d72313cfaf22ef37e9982c9fa1b308, disabling compactions & flushes 2024-12-10T11:01:22,303 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,303 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,303 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. after waiting 0 ms 2024-12-10T11:01:22,303 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,303 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,303 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for a9d72313cfaf22ef37e9982c9fa1b308: Waiting for close lock at 1733828482303Disabling compacts and flushes for region at 1733828482303Disabling writes for close at 1733828482303Writing region close event to WAL at 1733828482303Closed at 1733828482303 2024-12-10T11:01:22,305 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T11:01:22,306 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733828482305"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733828482305"}]},"ts":"1733828482305"} 2024-12-10T11:01:22,309 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T11:01:22,311 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T11:01:22,311 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733828482311"}]},"ts":"1733828482311"} 2024-12-10T11:01:22,314 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-10T11:01:22,314 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {944a6b9062fa=0} racks are {/default-rack=0} 2024-12-10T11:01:22,315 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:01:22,315 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:01:22,316 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:01:22,316 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:01:22,316 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:01:22,316 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:01:22,316 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:01:22,316 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:01:22,316 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:01:22,316 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:01:22,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a9d72313cfaf22ef37e9982c9fa1b308, ASSIGN}] 2024-12-10T11:01:22,318 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a9d72313cfaf22ef37e9982c9fa1b308, ASSIGN 2024-12-10T11:01:22,320 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a9d72313cfaf22ef37e9982c9fa1b308, ASSIGN; state=OFFLINE, location=944a6b9062fa,36563,1733828481159; forceNewPlan=false, retain=false 2024-12-10T11:01:22,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:22,470 INFO [944a6b9062fa:35283 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T11:01:22,471 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a9d72313cfaf22ef37e9982c9fa1b308, regionState=OPENING, regionLocation=944a6b9062fa,36563,1733828481159 2024-12-10T11:01:22,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a9d72313cfaf22ef37e9982c9fa1b308, ASSIGN because future has completed 2024-12-10T11:01:22,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a9d72313cfaf22ef37e9982c9fa1b308, server=944a6b9062fa,36563,1733828481159}] 2024-12-10T11:01:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:22,631 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T11:01:22,633 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55779, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T11:01:22,639 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,639 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a9d72313cfaf22ef37e9982c9fa1b308, NAME => 'TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:01:22,640 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,640 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:01:22,640 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,640 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,642 INFO [StoreOpener-a9d72313cfaf22ef37e9982c9fa1b308-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,644 INFO [StoreOpener-a9d72313cfaf22ef37e9982c9fa1b308-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a9d72313cfaf22ef37e9982c9fa1b308 columnFamilyName cf 2024-12-10T11:01:22,644 DEBUG [StoreOpener-a9d72313cfaf22ef37e9982c9fa1b308-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:01:22,644 INFO [StoreOpener-a9d72313cfaf22ef37e9982c9fa1b308-1 {}] regionserver.HStore(327): Store=a9d72313cfaf22ef37e9982c9fa1b308/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:01:22,645 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,646 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,646 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,647 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,647 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,649 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,653 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:01:22,653 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a9d72313cfaf22ef37e9982c9fa1b308; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62630768, jitterRate=-0.06672883033752441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T11:01:22,653 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:22,654 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a9d72313cfaf22ef37e9982c9fa1b308: Running coprocessor pre-open hook at 1733828482640Writing region info on filesystem at 1733828482640Initializing all the Stores at 1733828482641 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733828482642 (+1 ms)Cleaning up temporary data from old regions at 1733828482647 (+5 ms)Running coprocessor post-open hooks at 1733828482653 (+6 ms)Region opened successfully at 1733828482654 (+1 ms) 2024-12-10T11:01:22,656 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308., pid=6, masterSystemTime=1733828482630 2024-12-10T11:01:22,660 DEBUG [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,660 INFO [RS_OPEN_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:22,661 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a9d72313cfaf22ef37e9982c9fa1b308, regionState=OPEN, openSeqNum=2, regionLocation=944a6b9062fa,36563,1733828481159 2024-12-10T11:01:22,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a9d72313cfaf22ef37e9982c9fa1b308, server=944a6b9062fa,36563,1733828481159 because future has completed 2024-12-10T11:01:22,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T11:01:22,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a9d72313cfaf22ef37e9982c9fa1b308, server=944a6b9062fa,36563,1733828481159 in 191 msec 2024-12-10T11:01:22,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T11:01:22,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a9d72313cfaf22ef37e9982c9fa1b308, ASSIGN in 356 msec 2024-12-10T11:01:22,686 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T11:01:22,686 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733828482686"}]},"ts":"1733828482686"} 2024-12-10T11:01:22,692 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-10T11:01:22,693 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T11:01:22,695 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-10T11:01:22,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 431 msec 2024-12-10T11:01:22,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T11:01:22,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T11:01:22,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T11:01:22,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:01:22,895 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:01:22,895 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-10T11:01:22,895 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:01:22,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-10T11:01:22,899 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:01:22,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-10T11:01:22,902 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308., hostname=944a6b9062fa,36563,1733828481159, seqNum=2] 2024-12-10T11:01:22,902 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:01:22,905 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:01:22,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-10T11:01:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-10T11:01:22,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:22,911 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-10T11:01:22,912 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T11:01:22,912 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T11:01:23,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:23,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-10T11:01:23,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,068 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing a9d72313cfaf22ef37e9982c9fa1b308 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-10T11:01:23,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/.tmp/cf/931738b007dc430690c9d72419df7702 is 36, key is row/cf:cq/1733828482905/Put/seqid=0 2024-12-10T11:01:23,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741839_1015 (size=4787) 2024-12-10T11:01:23,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741839_1015 (size=4787) 2024-12-10T11:01:23,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741839_1015 (size=4787) 2024-12-10T11:01:23,096 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/.tmp/cf/931738b007dc430690c9d72419df7702 2024-12-10T11:01:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/.tmp/cf/931738b007dc430690c9d72419df7702 as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/cf/931738b007dc430690c9d72419df7702 2024-12-10T11:01:23,112 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/cf/931738b007dc430690c9d72419df7702, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T11:01:23,113 INFO [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for a9d72313cfaf22ef37e9982c9fa1b308 in 45ms, sequenceid=5, compaction requested=false 2024-12-10T11:01:23,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for a9d72313cfaf22ef37e9982c9fa1b308: 2024-12-10T11:01:23,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/944a6b9062fa:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-10T11:01:23,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-10T11:01:23,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T11:01:23,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-12-10T11:01:23,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 213 msec 2024-12-10T11:01:23,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:01:23,225 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:01:23,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T11:01:23,231 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:01:23,231 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:23,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,231 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T11:01:23,232 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T11:01:23,232 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1204610823, stopped=false 2024-12-10T11:01:23,232 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=944a6b9062fa,35283,1733828481062 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:23,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:23,234 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:01:23,235 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:01:23,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:23,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:23,235 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:23,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,235 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '944a6b9062fa,39659,1733828481125' ***** 2024-12-10T11:01:23,236 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:01:23,236 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '944a6b9062fa,36563,1733828481159' ***** 2024-12-10T11:01:23,236 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:01:23,236 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '944a6b9062fa,39637,1733828481207' ***** 2024-12-10T11:01:23,236 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:01:23,236 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:01:23,236 INFO [RS:0;944a6b9062fa:39659 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:01:23,236 INFO [RS:0;944a6b9062fa:39659 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:01:23,236 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(959): stopping server 944a6b9062fa,39659,1733828481125 2024-12-10T11:01:23,236 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:01:23,236 INFO [RS:0;944a6b9062fa:39659 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:23,236 INFO [RS:0;944a6b9062fa:39659 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;944a6b9062fa:39659. 2024-12-10T11:01:23,236 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:01:23,236 DEBUG [RS:0;944a6b9062fa:39659 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:23,236 INFO [RS:2;944a6b9062fa:39637 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:01:23,237 INFO [RS:2;944a6b9062fa:39637 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:01:23,237 DEBUG [RS:0;944a6b9062fa:39659 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,237 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:01:23,237 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:01:23,237 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(959): stopping server 944a6b9062fa,39637,1733828481207 2024-12-10T11:01:23,237 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(976): stopping server 944a6b9062fa,39659,1733828481125; all regions closed. 2024-12-10T11:01:23,237 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:01:23,237 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:23,237 INFO [RS:2;944a6b9062fa:39637 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:23,237 INFO [RS:1;944a6b9062fa:36563 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:01:23,237 INFO [RS:1;944a6b9062fa:36563 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:01:23,237 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:01:23,237 INFO [RS:2;944a6b9062fa:39637 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;944a6b9062fa:39637. 2024-12-10T11:01:23,237 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(3091): Received CLOSE for a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:23,237 DEBUG [RS:2;944a6b9062fa:39637 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:23,237 DEBUG [RS:2;944a6b9062fa:39637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,238 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:01:23,238 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:01:23,238 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:01:23,238 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T11:01:23,238 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,238 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,238 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,238 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(959): stopping server 944a6b9062fa,36563,1733828481159 2024-12-10T11:01:23,238 INFO [RS:1;944a6b9062fa:36563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:23,238 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,238 INFO [RS:1;944a6b9062fa:36563 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;944a6b9062fa:36563. 2024-12-10T11:01:23,238 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a9d72313cfaf22ef37e9982c9fa1b308, disabling compactions & flushes 2024-12-10T11:01:23,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,239 DEBUG [RS:1;944a6b9062fa:36563 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:01:23,239 INFO [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,239 DEBUG [RS:1;944a6b9062fa:36563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,239 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,239 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. after waiting 0 ms 2024-12-10T11:01:23,239 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T11:01:23,239 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,239 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1325): Online Regions={a9d72313cfaf22ef37e9982c9fa1b308=TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308.} 2024-12-10T11:01:23,239 DEBUG [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1351): Waiting on a9d72313cfaf22ef37e9982c9fa1b308 2024-12-10T11:01:23,241 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T11:01:23,241 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-10T11:01:23,241 DEBUG [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-10T11:01:23,241 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:01:23,242 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:01:23,242 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:01:23,242 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:01:23,242 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:01:23,242 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-10T11:01:23,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741834_1010 (size=93) 2024-12-10T11:01:23,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741834_1010 (size=93) 2024-12-10T11:01:23,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741834_1010 (size=93) 2024-12-10T11:01:23,249 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/default/TestHBaseWalOnEC/a9d72313cfaf22ef37e9982c9fa1b308/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T11:01:23,249 DEBUG [RS:0;944a6b9062fa:39659 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs 2024-12-10T11:01:23,249 INFO [RS:0;944a6b9062fa:39659 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 944a6b9062fa%2C39659%2C1733828481125:(num 1733828481765) 2024-12-10T11:01:23,250 DEBUG [RS:0;944a6b9062fa:39659 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] hbase.ChoreService(370): Chore service for: regionserver/944a6b9062fa:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:23,250 INFO [RS:0;944a6b9062fa:39659 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39659 2024-12-10T11:01:23,251 INFO [regionserver/944a6b9062fa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:23,251 INFO [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,251 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a9d72313cfaf22ef37e9982c9fa1b308: Waiting for close lock at 1733828483238Running coprocessor pre-close hooks at 1733828483238Disabling compacts and flushes for region at 1733828483238Disabling writes for close at 1733828483239 (+1 ms)Writing region close event to WAL at 1733828483243 (+4 ms)Running coprocessor post-close hooks at 1733828483250 (+7 ms)Closed at 1733828483251 (+1 ms) 2024-12-10T11:01:23,251 DEBUG [RS_CLOSE_REGION-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308. 2024-12-10T11:01:23,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/944a6b9062fa,39659,1733828481125 2024-12-10T11:01:23,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:23,252 INFO [RS:0;944a6b9062fa:39659 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:23,254 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [944a6b9062fa,39659,1733828481125] 2024-12-10T11:01:23,256 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/944a6b9062fa,39659,1733828481125 already deleted, retry=false 2024-12-10T11:01:23,256 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 944a6b9062fa,39659,1733828481125 expired; onlineServers=2 2024-12-10T11:01:23,268 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/info/3049054ce4684ac7b260836b257f0e48 is 153, key is TestHBaseWalOnEC,,1733828482261.a9d72313cfaf22ef37e9982c9fa1b308./info:regioninfo/1733828482661/Put/seqid=0 2024-12-10T11:01:23,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741840_1016 (size=6637) 2024-12-10T11:01:23,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741840_1016 (size=6637) 2024-12-10T11:01:23,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741840_1016 (size=6637) 2024-12-10T11:01:23,277 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/info/3049054ce4684ac7b260836b257f0e48 2024-12-10T11:01:23,300 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/ns/0d71c86ce8d9484b826bbaf9b3345c30 is 43, key is default/ns:d/1733828482183/Put/seqid=0 2024-12-10T11:01:23,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741841_1017 (size=5153) 2024-12-10T11:01:23,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741841_1017 (size=5153) 2024-12-10T11:01:23,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741841_1017 (size=5153) 2024-12-10T11:01:23,310 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/ns/0d71c86ce8d9484b826bbaf9b3345c30 2024-12-10T11:01:23,319 INFO [regionserver/944a6b9062fa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:23,319 INFO [regionserver/944a6b9062fa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:23,319 INFO [regionserver/944a6b9062fa:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:23,339 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/table/eca64052c64e40ba83125c008eb45352 is 52, key is TestHBaseWalOnEC/table:state/1733828482686/Put/seqid=0 2024-12-10T11:01:23,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741842_1018 (size=5249) 2024-12-10T11:01:23,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741842_1018 (size=5249) 2024-12-10T11:01:23,351 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/table/eca64052c64e40ba83125c008eb45352 2024-12-10T11:01:23,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741842_1018 (size=5249) 2024-12-10T11:01:23,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39659-0x10176ce04ba0001, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,354 INFO [RS:0;944a6b9062fa:39659 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:23,354 INFO [RS:0;944a6b9062fa:39659 {}] regionserver.HRegionServer(1031): Exiting; stopping=944a6b9062fa,39659,1733828481125; zookeeper connection closed. 2024-12-10T11:01:23,355 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@435fae63 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@435fae63 2024-12-10T11:01:23,362 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/info/3049054ce4684ac7b260836b257f0e48 as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/info/3049054ce4684ac7b260836b257f0e48 2024-12-10T11:01:23,371 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/info/3049054ce4684ac7b260836b257f0e48, entries=10, sequenceid=11, filesize=6.5 K 2024-12-10T11:01:23,373 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/ns/0d71c86ce8d9484b826bbaf9b3345c30 as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/ns/0d71c86ce8d9484b826bbaf9b3345c30 2024-12-10T11:01:23,383 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/ns/0d71c86ce8d9484b826bbaf9b3345c30, entries=2, sequenceid=11, filesize=5.0 K 2024-12-10T11:01:23,385 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/.tmp/table/eca64052c64e40ba83125c008eb45352 as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/table/eca64052c64e40ba83125c008eb45352 2024-12-10T11:01:23,392 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/table/eca64052c64e40ba83125c008eb45352, entries=2, sequenceid=11, filesize=5.1 K 2024-12-10T11:01:23,394 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false 2024-12-10T11:01:23,400 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-10T11:01:23,400 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T11:01:23,401 INFO [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:01:23,401 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733828483241Running coprocessor pre-close hooks at 1733828483241Disabling compacts and flushes for region at 1733828483241Disabling writes for close at 1733828483242 (+1 ms)Obtaining lock to block concurrent updates at 1733828483242Preparing flush snapshotting stores in 1588230740 at 1733828483242Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733828483243 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733828483244 (+1 ms)Flushing 1588230740/info: creating writer at 1733828483245 (+1 ms)Flushing 1588230740/info: appending metadata at 1733828483268 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733828483268Flushing 1588230740/ns: creating writer at 1733828483284 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733828483300 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733828483300Flushing 1588230740/table: creating writer at 1733828483320 (+20 ms)Flushing 1588230740/table: appending metadata at 1733828483338 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733828483338Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a0d8601: reopening flushed file at 1733828483360 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a53add: reopening flushed file at 1733828483372 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25744d44: reopening flushed file at 1733828483383 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false at 1733828483394 (+11 ms)Writing region close event to WAL at 1733828483395 (+1 ms)Running coprocessor post-close hooks at 1733828483400 (+5 ms)Closed at 1733828483400 2024-12-10T11:01:23,401 DEBUG [RS_CLOSE_META-regionserver/944a6b9062fa:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T11:01:23,439 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(976): stopping server 944a6b9062fa,36563,1733828481159; all regions closed. 2024-12-10T11:01:23,440 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,440 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,440 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,440 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,440 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,441 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(976): stopping server 944a6b9062fa,39637,1733828481207; all regions closed. 2024-12-10T11:01:23,442 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,442 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741835_1011 (size=1298) 2024-12-10T11:01:23,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741835_1011 (size=1298) 2024-12-10T11:01:23,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741835_1011 (size=1298) 2024-12-10T11:01:23,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741836_1012 (size=2751) 2024-12-10T11:01:23,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741836_1012 (size=2751) 2024-12-10T11:01:23,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741836_1012 (size=2751) 2024-12-10T11:01:23,447 DEBUG [RS:1;944a6b9062fa:36563 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs 2024-12-10T11:01:23,447 INFO [RS:1;944a6b9062fa:36563 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 944a6b9062fa%2C36563%2C1733828481159:(num 1733828481769) 2024-12-10T11:01:23,447 DEBUG [RS:1;944a6b9062fa:36563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] hbase.ChoreService(370): Chore service for: regionserver/944a6b9062fa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:01:23,448 INFO [regionserver/944a6b9062fa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:23,448 INFO [RS:1;944a6b9062fa:36563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36563 2024-12-10T11:01:23,450 DEBUG [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs 2024-12-10T11:01:23,450 INFO [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 944a6b9062fa%2C39637%2C1733828481207.meta:.meta(num 1733828482099) 2024-12-10T11:01:23,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,450 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,451 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741833_1009 (size=93) 2024-12-10T11:01:23,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741833_1009 (size=93) 2024-12-10T11:01:23,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741833_1009 (size=93) 2024-12-10T11:01:23,457 DEBUG [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/oldWALs 2024-12-10T11:01:23,457 INFO [RS:2;944a6b9062fa:39637 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 944a6b9062fa%2C39637%2C1733828481207:(num 1733828481759) 2024-12-10T11:01:23,457 DEBUG [RS:2;944a6b9062fa:39637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:01:23,457 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:01:23,457 INFO [RS:2;944a6b9062fa:39637 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:23,457 INFO [RS:2;944a6b9062fa:39637 {}] hbase.ChoreService(370): Chore service for: regionserver/944a6b9062fa:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:23,458 INFO [RS:2;944a6b9062fa:39637 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:23,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:01:23,458 INFO [regionserver/944a6b9062fa:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:23,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/944a6b9062fa,36563,1733828481159 2024-12-10T11:01:23,458 INFO [RS:2;944a6b9062fa:39637 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39637 2024-12-10T11:01:23,458 INFO [RS:1;944a6b9062fa:36563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:23,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/944a6b9062fa,39637,1733828481207 2024-12-10T11:01:23,460 INFO [RS:2;944a6b9062fa:39637 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:23,461 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [944a6b9062fa,39637,1733828481207] 2024-12-10T11:01:23,462 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/944a6b9062fa,39637,1733828481207 already deleted, retry=false 2024-12-10T11:01:23,462 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 944a6b9062fa,39637,1733828481207 expired; onlineServers=1 2024-12-10T11:01:23,462 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [944a6b9062fa,36563,1733828481159] 2024-12-10T11:01:23,464 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/944a6b9062fa,36563,1733828481159 already deleted, retry=false 2024-12-10T11:01:23,464 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 944a6b9062fa,36563,1733828481159 expired; onlineServers=0 2024-12-10T11:01:23,464 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '944a6b9062fa,35283,1733828481062' ***** 2024-12-10T11:01:23,464 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T11:01:23,464 INFO [M:0;944a6b9062fa:35283 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:01:23,464 INFO [M:0;944a6b9062fa:35283 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:01:23,464 DEBUG [M:0;944a6b9062fa:35283 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T11:01:23,464 DEBUG [M:0;944a6b9062fa:35283 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T11:01:23,464 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T11:01:23,464 DEBUG [master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.small.0-1733828481471 {}] cleaner.HFileCleaner(306): Exit Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.small.0-1733828481471,5,FailOnTimeoutGroup] 2024-12-10T11:01:23,464 DEBUG [master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.large.0-1733828481468 {}] cleaner.HFileCleaner(306): Exit Thread[master/944a6b9062fa:0:becomeActiveMaster-HFileCleaner.large.0-1733828481468,5,FailOnTimeoutGroup] 2024-12-10T11:01:23,464 INFO [M:0;944a6b9062fa:35283 {}] hbase.ChoreService(370): Chore service for: master/944a6b9062fa:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T11:01:23,465 INFO [M:0;944a6b9062fa:35283 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:01:23,465 DEBUG [M:0;944a6b9062fa:35283 {}] master.HMaster(1795): Stopping service threads 2024-12-10T11:01:23,465 INFO [M:0;944a6b9062fa:35283 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T11:01:23,465 INFO [M:0;944a6b9062fa:35283 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:01:23,465 INFO [M:0;944a6b9062fa:35283 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T11:01:23,465 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T11:01:23,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T11:01:23,466 DEBUG [M:0;944a6b9062fa:35283 {}] zookeeper.ZKUtil(347): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T11:01:23,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:01:23,466 WARN [M:0;944a6b9062fa:35283 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T11:01:23,467 INFO [M:0;944a6b9062fa:35283 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/.lastflushedseqids 2024-12-10T11:01:23,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741843_1019 (size=127) 2024-12-10T11:01:23,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741843_1019 (size=127) 2024-12-10T11:01:23,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741843_1019 (size=127) 2024-12-10T11:01:23,482 INFO [M:0;944a6b9062fa:35283 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T11:01:23,482 INFO [M:0;944a6b9062fa:35283 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T11:01:23,483 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:01:23,483 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:23,483 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:23,483 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:01:23,483 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:23,483 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-10T11:01:23,508 DEBUG [M:0;944a6b9062fa:35283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc718bdb5d1c4019845c0988d013cb2e is 82, key is hbase:meta,,1/info:regioninfo/1733828482162/Put/seqid=0 2024-12-10T11:01:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741844_1020 (size=5672) 2024-12-10T11:01:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741844_1020 (size=5672) 2024-12-10T11:01:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741844_1020 (size=5672) 2024-12-10T11:01:23,523 INFO [M:0;944a6b9062fa:35283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc718bdb5d1c4019845c0988d013cb2e 2024-12-10T11:01:23,555 DEBUG [M:0;944a6b9062fa:35283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df10c18e70f54397933ea63d9acc4369 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733828482696/Put/seqid=0 2024-12-10T11:01:23,557 WARN [IPC Server handler 3 on default port 34775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:01:23,557 WARN [IPC Server handler 3 on default port 34775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:01:23,557 WARN [IPC Server handler 3 on default port 34775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:01:23,562 INFO [RS:1;944a6b9062fa:36563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:23,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,562 INFO [RS:1;944a6b9062fa:36563 {}] regionserver.HRegionServer(1031): Exiting; stopping=944a6b9062fa,36563,1733828481159; zookeeper connection closed. 2024-12-10T11:01:23,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36563-0x10176ce04ba0002, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39637-0x10176ce04ba0003, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,563 INFO [RS:2;944a6b9062fa:39637 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:23,563 INFO [RS:2;944a6b9062fa:39637 {}] regionserver.HRegionServer(1031): Exiting; stopping=944a6b9062fa,39637,1733828481207; zookeeper connection closed. 2024-12-10T11:01:23,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741845_1021 (size=6439) 2024-12-10T11:01:23,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741845_1021 (size=6439) 2024-12-10T11:01:23,568 INFO [M:0;944a6b9062fa:35283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df10c18e70f54397933ea63d9acc4369 2024-12-10T11:01:23,577 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7dbfb8ba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7dbfb8ba 2024-12-10T11:01:23,577 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@bad8b89 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@bad8b89 2024-12-10T11:01:23,578 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T11:01:23,593 DEBUG [M:0;944a6b9062fa:35283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/71f78b3ce4fc4567ade41fe729865236 is 69, key is 944a6b9062fa,36563,1733828481159/rs:state/1733828481570/Put/seqid=0 2024-12-10T11:01:23,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741846_1022 (size=5294) 2024-12-10T11:01:23,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741846_1022 (size=5294) 2024-12-10T11:01:23,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741846_1022 (size=5294) 2024-12-10T11:01:23,617 INFO [M:0;944a6b9062fa:35283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/71f78b3ce4fc4567ade41fe729865236 2024-12-10T11:01:23,634 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc718bdb5d1c4019845c0988d013cb2e as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc718bdb5d1c4019845c0988d013cb2e 2024-12-10T11:01:23,648 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc718bdb5d1c4019845c0988d013cb2e, entries=8, sequenceid=72, filesize=5.5 K 2024-12-10T11:01:23,650 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df10c18e70f54397933ea63d9acc4369 as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df10c18e70f54397933ea63d9acc4369 2024-12-10T11:01:23,660 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df10c18e70f54397933ea63d9acc4369, entries=8, sequenceid=72, filesize=6.3 K 2024-12-10T11:01:23,661 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/71f78b3ce4fc4567ade41fe729865236 as hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/71f78b3ce4fc4567ade41fe729865236 2024-12-10T11:01:23,669 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34775/user/jenkins/test-data/8f2905a0-718f-5830-b114-a35130369686/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/71f78b3ce4fc4567ade41fe729865236, entries=3, sequenceid=72, filesize=5.2 K 2024-12-10T11:01:23,671 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=72, compaction requested=false 2024-12-10T11:01:23,677 INFO [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:01:23,678 DEBUG [M:0;944a6b9062fa:35283 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733828483483Disabling compacts and flushes for region at 1733828483483Disabling writes for close at 1733828483483Obtaining lock to block concurrent updates at 1733828483483Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733828483483Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733828483484 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733828483485 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733828483485Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733828483508 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733828483508Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733828483531 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733828483554 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733828483554Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733828483576 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733828483593 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733828483593Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f03599d: reopening flushed file at 1733828483632 (+39 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12565dd8: reopening flushed file at 1733828483648 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65254ba0: reopening flushed file at 1733828483660 (+12 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=72, compaction requested=false at 1733828483671 (+11 ms)Writing region close event to WAL at 1733828483677 (+6 ms)Closed at 1733828483677 2024-12-10T11:01:23,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,678 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,678 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,678 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,679 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:01:23,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46281 is added to blk_1073741830_1006 (size=32674) 2024-12-10T11:01:23,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39269 is added to blk_1073741830_1006 (size=32674) 2024-12-10T11:01:23,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741830_1006 (size=32674) 2024-12-10T11:01:23,684 INFO [M:0;944a6b9062fa:35283 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T11:01:23,684 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:01:23,684 INFO [M:0;944a6b9062fa:35283 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35283 2024-12-10T11:01:23,684 INFO [M:0;944a6b9062fa:35283 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:01:23,787 INFO [M:0;944a6b9062fa:35283 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:01:23,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35283-0x10176ce04ba0000, quorum=127.0.0.1:56759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:01:23,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ff8a873{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:23,790 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ec581d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:23,790 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:23,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@328032c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:23,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e56045e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:23,791 WARN [BP-527819418-172.17.0.2-1733828479876 heartbeating to localhost/127.0.0.1:34775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:01:23,791 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:01:23,792 WARN [BP-527819418-172.17.0.2-1733828479876 heartbeating to localhost/127.0.0.1:34775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-527819418-172.17.0.2-1733828479876 (Datanode Uuid 916dd368-7964-443b-8ee1-f72da2d6f5e5) service to localhost/127.0.0.1:34775 2024-12-10T11:01:23,792 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:01:23,792 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data5/current/BP-527819418-172.17.0.2-1733828479876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:23,792 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data6/current/BP-527819418-172.17.0.2-1733828479876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:23,793 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:01:23,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25106b03{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:23,795 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ae51624{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:23,795 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:23,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@564d8641{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:23,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55fefad1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:23,797 WARN [BP-527819418-172.17.0.2-1733828479876 heartbeating to localhost/127.0.0.1:34775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:01:23,797 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:01:23,797 WARN [BP-527819418-172.17.0.2-1733828479876 heartbeating to localhost/127.0.0.1:34775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-527819418-172.17.0.2-1733828479876 (Datanode Uuid 2a03b13b-bfdc-4c6f-8a9c-914822753a8c) service to localhost/127.0.0.1:34775 2024-12-10T11:01:23,797 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:01:23,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data3/current/BP-527819418-172.17.0.2-1733828479876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:23,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data4/current/BP-527819418-172.17.0.2-1733828479876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:23,798 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:01:23,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@760f4a1c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:01:23,801 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e494f88{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:23,801 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:23,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d1c78c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:23,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1841cfc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:23,802 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:01:23,802 WARN [BP-527819418-172.17.0.2-1733828479876 heartbeating to localhost/127.0.0.1:34775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:01:23,802 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:01:23,802 WARN [BP-527819418-172.17.0.2-1733828479876 heartbeating to localhost/127.0.0.1:34775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-527819418-172.17.0.2-1733828479876 (Datanode Uuid b2e29257-bd11-4237-b1d9-99f8f9926cb6) service to localhost/127.0.0.1:34775 2024-12-10T11:01:23,803 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data1/current/BP-527819418-172.17.0.2-1733828479876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:23,803 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/cluster_e4d66540-2b20-9d9c-e821-ab43ce280b35/data/data2/current/BP-527819418-172.17.0.2-1733828479876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:01:23,803 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:01:23,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a4689e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:01:23,809 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53c85678{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:01:23,810 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:01:23,810 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18ee857{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:01:23,810 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2baf02db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce566f6a-d1aa-079d-81ea-3bf5ce5f3d9f/hadoop.log.dir/,STOPPED} 2024-12-10T11:01:23,817 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T11:01:23,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-10T11:01:23,849 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=145 (was 85) - Thread LEAK? -, OpenFileDescriptor=518 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=957 (was 989), ProcessCount=11 (was 11), AvailableMemoryMB=4257 (was 4461)