2024-12-08 20:46:10,919 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-08 20:46:10,929 main DEBUG Took 0.009000 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 20:46:10,930 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 20:46:10,930 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 20:46:10,931 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 20:46:10,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,941 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 20:46:10,959 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,961 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,962 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,962 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,963 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,963 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,964 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,965 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,965 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,966 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,967 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,967 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,968 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,969 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,969 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,970 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,970 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,971 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,971 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,972 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,972 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,973 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,973 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,974 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 20:46:10,974 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,975 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 20:46:10,976 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 20:46:10,978 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 20:46:10,980 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 20:46:10,981 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 20:46:10,982 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 20:46:10,983 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 20:46:10,992 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 20:46:10,996 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 20:46:10,998 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 20:46:10,998 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 20:46:10,999 main DEBUG createAppenders(={Console}) 2024-12-08 20:46:11,000 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-08 20:46:11,000 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-08 20:46:11,000 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-08 20:46:11,001 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 20:46:11,001 main DEBUG OutputStream closed 2024-12-08 20:46:11,002 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 20:46:11,002 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 20:46:11,002 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-08 20:46:11,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 20:46:11,075 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 20:46:11,076 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 20:46:11,077 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 20:46:11,078 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 20:46:11,078 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 20:46:11,079 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 20:46:11,079 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 20:46:11,079 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 20:46:11,080 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 20:46:11,080 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 20:46:11,080 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 20:46:11,081 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 20:46:11,081 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 20:46:11,081 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 20:46:11,082 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 20:46:11,082 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 20:46:11,083 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 20:46:11,085 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 20:46:11,085 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-08 20:46:11,085 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 20:46:11,086 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-08T20:46:11,103 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-08 20:46:11,105 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 20:46:11,106 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T20:46:11,324 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0 2024-12-08T20:46:11,351 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7, deleteOnExit=true 2024-12-08T20:46:11,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/test.cache.data in system properties and HBase conf 2024-12-08T20:46:11,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T20:46:11,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir in system properties and HBase conf 2024-12-08T20:46:11,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T20:46:11,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T20:46:11,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T20:46:11,462 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T20:46:11,542 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T20:46:11,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T20:46:11,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T20:46:11,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T20:46:11,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T20:46:11,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T20:46:11,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T20:46:11,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T20:46:11,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T20:46:11,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T20:46:11,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/nfs.dump.dir in system properties and HBase conf 2024-12-08T20:46:11,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/java.io.tmpdir in system properties and HBase conf 2024-12-08T20:46:11,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T20:46:11,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T20:46:11,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T20:46:12,452 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T20:46:12,516 INFO [Time-limited test {}] log.Log(170): Logging initialized @2169ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T20:46:12,578 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:12,633 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:12,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:12,652 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:12,653 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T20:46:12,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:12,666 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:12,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:12,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/java.io.tmpdir/jetty-localhost-44459-hadoop-hdfs-3_4_1-tests_jar-_-any-1062739206107607450/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T20:46:12,850 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:44459} 2024-12-08T20:46:12,850 INFO [Time-limited test {}] server.Server(415): Started @2504ms 2024-12-08T20:46:13,341 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:13,347 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:13,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:13,349 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:13,349 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T20:46:13,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:13,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:13,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/java.io.tmpdir/jetty-localhost-35965-hadoop-hdfs-3_4_1-tests_jar-_-any-14111004799334880133/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:13,447 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:35965} 2024-12-08T20:46:13,447 INFO [Time-limited test {}] server.Server(415): Started @3100ms 2024-12-08T20:46:13,501 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T20:46:13,602 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:13,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:13,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:13,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:13,610 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T20:46:13,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:13,612 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:13,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/java.io.tmpdir/jetty-localhost-43913-hadoop-hdfs-3_4_1-tests_jar-_-any-2372508947092487043/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:13,716 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:43913} 2024-12-08T20:46:13,716 INFO [Time-limited test {}] server.Server(415): Started @3370ms 2024-12-08T20:46:13,718 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T20:46:13,747 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:13,751 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:13,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:13,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:13,754 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T20:46:13,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:13,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:13,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/java.io.tmpdir/jetty-localhost-33631-hadoop-hdfs-3_4_1-tests_jar-_-any-5764547867300211320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:13,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:33631} 2024-12-08T20:46:13,855 INFO [Time-limited test {}] server.Server(415): Started @3509ms 2024-12-08T20:46:13,857 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T20:46:14,749 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data4/current/BP-1275018439-172.17.0.2-1733690772010/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:14,749 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data3/current/BP-1275018439-172.17.0.2-1733690772010/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:14,749 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data1/current/BP-1275018439-172.17.0.2-1733690772010/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:14,749 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data2/current/BP-1275018439-172.17.0.2-1733690772010/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:14,769 WARN [Thread-135 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data5/current/BP-1275018439-172.17.0.2-1733690772010/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:14,772 WARN [Thread-137 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data6/current/BP-1275018439-172.17.0.2-1733690772010/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:14,783 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T20:46:14,784 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T20:46:14,799 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T20:46:14,832 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64e03df56cbd1dde with lease ID 0xdbfe4058099790d4: Processing first storage report for DS-b4220310-ad5a-47a7-befb-3a3f401363aa from datanode DatanodeRegistration(127.0.0.1:44679, datanodeUuid=d1ee03ba-1f3d-4536-afff-e562885d5ca5, infoPort=43211, infoSecurePort=0, ipcPort=41701, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010) 2024-12-08T20:46:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64e03df56cbd1dde with lease ID 0xdbfe4058099790d4: from storage DS-b4220310-ad5a-47a7-befb-3a3f401363aa node DatanodeRegistration(127.0.0.1:44679, datanodeUuid=d1ee03ba-1f3d-4536-afff-e562885d5ca5, infoPort=43211, infoSecurePort=0, ipcPort=41701, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-08T20:46:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca1ba14036750b0 with lease ID 0xdbfe4058099790d3: Processing first storage report for DS-d50db4ae-edca-4847-b168-60d42b7f2714 from datanode DatanodeRegistration(127.0.0.1:39933, datanodeUuid=c41ca5ac-58d8-4577-9448-6d6184fbdc01, infoPort=38757, infoSecurePort=0, ipcPort=45871, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010) 2024-12-08T20:46:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca1ba14036750b0 with lease ID 0xdbfe4058099790d3: from storage DS-d50db4ae-edca-4847-b168-60d42b7f2714 node DatanodeRegistration(127.0.0.1:39933, datanodeUuid=c41ca5ac-58d8-4577-9448-6d6184fbdc01, infoPort=38757, infoSecurePort=0, ipcPort=45871, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf385343a94b141a0 with lease ID 0xdbfe4058099790d5: Processing first storage report for DS-008decea-2955-49eb-a282-8407e50725e2 from datanode DatanodeRegistration(127.0.0.1:39109, datanodeUuid=ecdacbb5-e4cb-4432-a7b7-66590527d01f, infoPort=39119, infoSecurePort=0, ipcPort=42531, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010) 2024-12-08T20:46:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf385343a94b141a0 with lease ID 0xdbfe4058099790d5: from storage DS-008decea-2955-49eb-a282-8407e50725e2 node DatanodeRegistration(127.0.0.1:39109, datanodeUuid=ecdacbb5-e4cb-4432-a7b7-66590527d01f, infoPort=39119, infoSecurePort=0, ipcPort=42531, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T20:46:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64e03df56cbd1dde with lease ID 0xdbfe4058099790d4: Processing first storage report for DS-1de64505-2432-446e-ad84-65cb7dffcb0d from datanode DatanodeRegistration(127.0.0.1:44679, datanodeUuid=d1ee03ba-1f3d-4536-afff-e562885d5ca5, infoPort=43211, infoSecurePort=0, ipcPort=41701, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010) 2024-12-08T20:46:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64e03df56cbd1dde with lease ID 0xdbfe4058099790d4: from storage DS-1de64505-2432-446e-ad84-65cb7dffcb0d node DatanodeRegistration(127.0.0.1:44679, datanodeUuid=d1ee03ba-1f3d-4536-afff-e562885d5ca5, infoPort=43211, infoSecurePort=0, ipcPort=41701, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca1ba14036750b0 with lease ID 0xdbfe4058099790d3: Processing first storage report for DS-2fcf6104-c9dd-44e1-b484-935fbd92df90 from datanode DatanodeRegistration(127.0.0.1:39933, datanodeUuid=c41ca5ac-58d8-4577-9448-6d6184fbdc01, infoPort=38757, infoSecurePort=0, ipcPort=45871, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010) 2024-12-08T20:46:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca1ba14036750b0 with lease ID 0xdbfe4058099790d3: from storage DS-2fcf6104-c9dd-44e1-b484-935fbd92df90 node DatanodeRegistration(127.0.0.1:39933, datanodeUuid=c41ca5ac-58d8-4577-9448-6d6184fbdc01, infoPort=38757, infoSecurePort=0, ipcPort=45871, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf385343a94b141a0 with lease ID 0xdbfe4058099790d5: Processing first storage report for DS-55a980e7-63e3-480d-8bb5-43cdd93f968b from datanode DatanodeRegistration(127.0.0.1:39109, datanodeUuid=ecdacbb5-e4cb-4432-a7b7-66590527d01f, infoPort=39119, infoSecurePort=0, ipcPort=42531, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010) 2024-12-08T20:46:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf385343a94b141a0 with lease ID 0xdbfe4058099790d5: from storage DS-55a980e7-63e3-480d-8bb5-43cdd93f968b node DatanodeRegistration(127.0.0.1:39109, datanodeUuid=ecdacbb5-e4cb-4432-a7b7-66590527d01f, infoPort=39119, infoSecurePort=0, ipcPort=42531, storageInfo=lv=-57;cid=testClusterID;nsid=1924128084;c=1733690772010), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:14,836 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0 2024-12-08T20:46:14,898 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-08T20:46:14,943 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=185, ProcessCount=11, AvailableMemoryMB=17194 2024-12-08T20:46:14,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T20:46:14,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-08T20:46:15,049 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/zookeeper_0, clientPort=60655, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T20:46:15,058 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60655 2024-12-08T20:46:15,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:15,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:15,148 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:15,148 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:15,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:44778 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44778 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:15,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-08T20:46:15,615 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:15,628 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc with version=8 2024-12-08T20:46:15,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/hbase-staging 2024-12-08T20:46:15,719 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T20:46:15,951 INFO [Time-limited test {}] client.ConnectionUtils(128): master/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:15,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:15,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:15,965 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:15,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:15,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:16,081 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T20:46:16,132 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T20:46:16,139 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T20:46:16,142 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:16,163 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19821 (auto-detected) 2024-12-08T20:46:16,164 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T20:46:16,179 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39487 2024-12-08T20:46:16,196 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39487 connecting to ZooKeeper ensemble=127.0.0.1:60655 2024-12-08T20:46:16,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:394870x0, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:16,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39487-0x100073b94950000 connected 2024-12-08T20:46:16,364 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,366 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:16,379 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc, hbase.cluster.distributed=false 2024-12-08T20:46:16,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:16,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39487 2024-12-08T20:46:16,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39487 2024-12-08T20:46:16,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39487 2024-12-08T20:46:16,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39487 2024-12-08T20:46:16,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39487 2024-12-08T20:46:16,497 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:16,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,499 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:16,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:16,501 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T20:46:16,504 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:16,505 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35231 2024-12-08T20:46:16,507 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35231 connecting to ZooKeeper ensemble=127.0.0.1:60655 2024-12-08T20:46:16,508 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352310x0, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:16,538 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35231-0x100073b94950001 connected 2024-12-08T20:46:16,538 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:16,542 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T20:46:16,552 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T20:46:16,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T20:46:16,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:16,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35231 2024-12-08T20:46:16,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35231 2024-12-08T20:46:16,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35231 2024-12-08T20:46:16,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35231 2024-12-08T20:46:16,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35231 2024-12-08T20:46:16,579 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:16,579 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,579 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,580 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:16,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:16,580 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T20:46:16,581 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:16,582 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40021 2024-12-08T20:46:16,583 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40021 connecting to ZooKeeper ensemble=127.0.0.1:60655 2024-12-08T20:46:16,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,589 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,612 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400210x0, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:16,613 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40021-0x100073b94950002 connected 2024-12-08T20:46:16,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:16,614 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T20:46:16,615 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T20:46:16,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T20:46:16,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:16,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40021 2024-12-08T20:46:16,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40021 2024-12-08T20:46:16,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40021 2024-12-08T20:46:16,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40021 2024-12-08T20:46:16,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40021 2024-12-08T20:46:16,639 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:16,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,639 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:16,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:16,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:16,640 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T20:46:16,640 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:16,641 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35153 2024-12-08T20:46:16,642 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35153 connecting to ZooKeeper ensemble=127.0.0.1:60655 2024-12-08T20:46:16,643 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351530x0, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:16,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:351530x0, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:16,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35153-0x100073b94950003 connected 2024-12-08T20:46:16,655 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T20:46:16,656 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T20:46:16,657 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T20:46:16,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:16,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35153 2024-12-08T20:46:16,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35153 2024-12-08T20:46:16,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35153 2024-12-08T20:46:16,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35153 2024-12-08T20:46:16,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35153 2024-12-08T20:46:16,677 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;229bab1f9d30:39487 2024-12-08T20:46:16,678 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/229bab1f9d30,39487,1733690775793 2024-12-08T20:46:16,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,687 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,690 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/229bab1f9d30,39487,1733690775793 2024-12-08T20:46:16,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:16,712 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:16,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:16,712 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,714 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T20:46:16,715 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/229bab1f9d30,39487,1733690775793 from backup master directory 2024-12-08T20:46:16,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/229bab1f9d30,39487,1733690775793 2024-12-08T20:46:16,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,720 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:16,721 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:16,721 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=229bab1f9d30,39487,1733690775793 2024-12-08T20:46:16,723 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T20:46:16,724 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T20:46:16,783 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/hbase.id] with ID: cdb35848-c144-4b72-a6ae-dfd079c9c496 2024-12-08T20:46:16,783 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/.tmp/hbase.id 2024-12-08T20:46:16,790 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,790 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,793 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:33942 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39933:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33942 dst: /127.0.0.1:39933 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:16,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-08T20:46:16,799 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:16,799 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/.tmp/hbase.id]:[hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/hbase.id] 2024-12-08T20:46:16,840 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:16,845 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T20:46:16,862 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-08T20:46:16,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,887 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:16,904 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,904 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,907 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:44806 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44806 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:16,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-08T20:46:16,914 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:16,931 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T20:46:16,933 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T20:46:16,937 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T20:46:16,961 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,961 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,964 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:44820 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44820 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:16,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-08T20:46:16,970 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:16,985 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store 2024-12-08T20:46:16,999 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:16,999 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:17,002 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:33256 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33256 dst: /127.0.0.1:39109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:17,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-08T20:46:17,007 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:17,010 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T20:46:17,013 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:17,014 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T20:46:17,014 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:17,015 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:17,016 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T20:46:17,016 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:17,016 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:17,018 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733690777014Disabling compacts and flushes for region at 1733690777014Disabling writes for close at 1733690777016 (+2 ms)Writing region close event to WAL at 1733690777016Closed at 1733690777016 2024-12-08T20:46:17,020 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/.initializing 2024-12-08T20:46:17,020 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/WALs/229bab1f9d30,39487,1733690775793 2024-12-08T20:46:17,030 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T20:46:17,045 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C39487%2C1733690775793, suffix=, logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/WALs/229bab1f9d30,39487,1733690775793, archiveDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/oldWALs, maxLogs=10 2024-12-08T20:46:17,081 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/WALs/229bab1f9d30,39487,1733690775793/229bab1f9d30%2C39487%2C1733690775793.1733690777051, exclude list is [], retry=0 2024-12-08T20:46:17,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:17,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39933,DS-d50db4ae-edca-4847-b168-60d42b7f2714,DISK] 2024-12-08T20:46:17,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39109,DS-008decea-2955-49eb-a282-8407e50725e2,DISK] 2024-12-08T20:46:17,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44679,DS-b4220310-ad5a-47a7-befb-3a3f401363aa,DISK] 2024-12-08T20:46:17,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-08T20:46:17,137 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/WALs/229bab1f9d30,39487,1733690775793/229bab1f9d30%2C39487%2C1733690775793.1733690777051 2024-12-08T20:46:17,138 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39119:39119),(127.0.0.1/127.0.0.1:38757:38757),(127.0.0.1/127.0.0.1:43211:43211)] 2024-12-08T20:46:17,138 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T20:46:17,139 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:17,141 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,142 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T20:46:17,199 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:17,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:17,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T20:46:17,205 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:17,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:17,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T20:46:17,209 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:17,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:17,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T20:46:17,213 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:17,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:17,214 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,217 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,218 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,223 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,223 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,227 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T20:46:17,230 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:17,235 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T20:46:17,237 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69856549, jitterRate=0.0409436970949173}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T20:46:17,244 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733690777154Initializing all the Stores at 1733690777156 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690777156Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690777157 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690777157Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690777157Cleaning up temporary data from old regions at 1733690777223 (+66 ms)Region opened successfully at 1733690777244 (+21 ms) 2024-12-08T20:46:17,245 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T20:46:17,277 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@637b573a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:17,303 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T20:46:17,313 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T20:46:17,313 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T20:46:17,315 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T20:46:17,316 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T20:46:17,321 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-08T20:46:17,322 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T20:46:17,344 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T20:46:17,351 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T20:46:17,404 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T20:46:17,407 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T20:46:17,410 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T20:46:17,420 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T20:46:17,425 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T20:46:17,431 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T20:46:17,436 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T20:46:17,438 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T20:46:17,445 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T20:46:17,463 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T20:46:17,470 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T20:46:17,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:17,478 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:17,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:17,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:17,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,479 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,482 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=229bab1f9d30,39487,1733690775793, sessionid=0x100073b94950000, setting cluster-up flag (Was=false) 2024-12-08T20:46:17,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,512 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,537 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T20:46:17,541 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,39487,1733690775793 2024-12-08T20:46:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,562 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:17,587 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T20:46:17,589 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,39487,1733690775793 2024-12-08T20:46:17,597 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T20:46:17,664 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(746): ClusterId : cdb35848-c144-4b72-a6ae-dfd079c9c496 2024-12-08T20:46:17,664 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(746): ClusterId : cdb35848-c144-4b72-a6ae-dfd079c9c496 2024-12-08T20:46:17,664 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(746): ClusterId : cdb35848-c144-4b72-a6ae-dfd079c9c496 2024-12-08T20:46:17,667 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T20:46:17,667 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T20:46:17,667 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T20:46:17,666 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:17,675 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T20:46:17,681 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T20:46:17,690 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T20:46:17,690 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T20:46:17,690 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T20:46:17,690 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T20:46:17,690 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T20:46:17,690 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T20:46:17,688 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 229bab1f9d30,39487,1733690775793 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T20:46:17,705 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T20:46:17,705 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T20:46:17,705 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T20:46:17,705 DEBUG [RS:2;229bab1f9d30:35153 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@275f0e2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:17,705 DEBUG [RS:0;229bab1f9d30:35231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39fb14a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:17,705 DEBUG [RS:1;229bab1f9d30:40021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154da6e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:17,707 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:17,707 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:17,707 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:17,707 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:17,708 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/229bab1f9d30:0, corePoolSize=10, maxPoolSize=10 2024-12-08T20:46:17,708 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,708 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:17,708 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,716 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:17,717 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T20:46:17,720 DEBUG [RS:2;229bab1f9d30:35153 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;229bab1f9d30:35153 2024-12-08T20:46:17,723 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733690807723 2024-12-08T20:46:17,724 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T20:46:17,724 DEBUG [RS:0;229bab1f9d30:35231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;229bab1f9d30:35231 2024-12-08T20:46:17,724 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;229bab1f9d30:40021 2024-12-08T20:46:17,724 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T20:46:17,724 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T20:46:17,724 DEBUG [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T20:46:17,724 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T20:46:17,724 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T20:46:17,724 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T20:46:17,724 DEBUG [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T20:46:17,724 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T20:46:17,725 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:17,725 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T20:46:17,725 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T20:46:17,727 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T20:46:17,727 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,39487,1733690775793 with port=35153, startcode=1733690776638 2024-12-08T20:46:17,727 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,39487,1733690775793 with port=40021, startcode=1733690776578 2024-12-08T20:46:17,728 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,39487,1733690775793 with port=35231, startcode=1733690776465 2024-12-08T20:46:17,730 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T20:46:17,731 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T20:46:17,731 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T20:46:17,731 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T20:46:17,733 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:17,733 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:17,732 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,741 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T20:46:17,741 DEBUG [RS:1;229bab1f9d30:40021 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T20:46:17,741 DEBUG [RS:2;229bab1f9d30:35153 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T20:46:17,742 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T20:46:17,742 DEBUG [RS:0;229bab1f9d30:35231 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T20:46:17,742 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T20:46:17,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:44834 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44834 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:17,751 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T20:46:17,751 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T20:46:17,757 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733690777752,5,FailOnTimeoutGroup] 2024-12-08T20:46:17,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-08T20:46:17,759 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:17,763 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733690777757,5,FailOnTimeoutGroup] 2024-12-08T20:46:17,763 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,763 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T20:46:17,763 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T20:46:17,764 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc 2024-12-08T20:46:17,765 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,766 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,777 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:17,777 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:17,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:33278 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:39109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33278 dst: /127.0.0.1:39109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:17,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-08T20:46:17,789 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37319, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T20:46:17,789 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50907, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T20:46:17,789 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46569, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T20:46:17,795 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39487 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,40021,1733690776578 2024-12-08T20:46:17,797 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39487 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,40021,1733690776578 2024-12-08T20:46:17,809 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39487 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,35153,1733690776638 2024-12-08T20:46:17,810 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39487 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,35153,1733690776638 2024-12-08T20:46:17,814 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc 2024-12-08T20:46:17,814 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43629 2024-12-08T20:46:17,814 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T20:46:17,816 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39487 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,35231,1733690776465 2024-12-08T20:46:17,816 DEBUG [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc 2024-12-08T20:46:17,816 DEBUG [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43629 2024-12-08T20:46:17,816 DEBUG [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T20:46:17,817 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39487 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,35231,1733690776465 2024-12-08T20:46:17,820 DEBUG [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc 2024-12-08T20:46:17,820 DEBUG [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43629 2024-12-08T20:46:17,820 DEBUG [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T20:46:17,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-08T20:46:17,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-08T20:46:17,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T20:46:17,878 DEBUG [RS:1;229bab1f9d30:40021 {}] zookeeper.ZKUtil(111): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,40021,1733690776578 2024-12-08T20:46:17,878 DEBUG [RS:2;229bab1f9d30:35153 {}] zookeeper.ZKUtil(111): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,35153,1733690776638 2024-12-08T20:46:17,879 WARN [RS:2;229bab1f9d30:35153 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:17,879 WARN [RS:1;229bab1f9d30:40021 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:17,879 INFO [RS:1;229bab1f9d30:40021 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T20:46:17,879 INFO [RS:2;229bab1f9d30:35153 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T20:46:17,879 DEBUG [RS:0;229bab1f9d30:35231 {}] zookeeper.ZKUtil(111): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,35231,1733690776465 2024-12-08T20:46:17,879 WARN [RS:0;229bab1f9d30:35231 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:17,879 INFO [RS:0;229bab1f9d30:35231 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T20:46:17,879 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578 2024-12-08T20:46:17,879 DEBUG [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35153,1733690776638 2024-12-08T20:46:17,879 DEBUG [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35231,1733690776465 2024-12-08T20:46:17,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,35231,1733690776465] 2024-12-08T20:46:17,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,40021,1733690776578] 2024-12-08T20:46:17,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,35153,1733690776638] 2024-12-08T20:46:17,901 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T20:46:17,901 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T20:46:17,901 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T20:46:17,912 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T20:46:17,912 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T20:46:17,913 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T20:46:17,918 INFO [RS:2;229bab1f9d30:35153 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T20:46:17,918 INFO [RS:0;229bab1f9d30:35231 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T20:46:17,918 INFO [RS:1;229bab1f9d30:40021 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T20:46:17,918 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,918 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,918 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,919 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T20:46:17,919 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T20:46:17,921 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T20:46:17,925 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T20:46:17,925 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T20:46:17,925 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T20:46:17,926 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,926 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,926 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,926 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,926 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,926 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:17,927 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,927 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:17,928 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:17,928 DEBUG [RS:1;229bab1f9d30:40021 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:17,928 DEBUG [RS:0;229bab1f9d30:35231 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:17,928 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,928 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:17,929 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:17,929 DEBUG [RS:2;229bab1f9d30:35153 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:17,932 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,35153,1733690776638-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,932 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,35231,1733690776465-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:17,933 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,40021,1733690776578-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:17,951 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T20:46:17,951 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T20:46:17,951 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T20:46:17,953 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,35231,1733690776465-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,953 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,35153,1733690776638-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,953 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,40021,1733690776578-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,953 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,953 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,954 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.Replication(171): 229bab1f9d30,35231,1733690776465 started 2024-12-08T20:46:17,954 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.Replication(171): 229bab1f9d30,40021,1733690776578 started 2024-12-08T20:46:17,955 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,955 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.Replication(171): 229bab1f9d30,35153,1733690776638 started 2024-12-08T20:46:17,970 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,971 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,35231,1733690776465, RpcServer on 229bab1f9d30/172.17.0.2:35231, sessionid=0x100073b94950001 2024-12-08T20:46:17,971 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T20:46:17,972 DEBUG [RS:0;229bab1f9d30:35231 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,35231,1733690776465 2024-12-08T20:46:17,972 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,35231,1733690776465' 2024-12-08T20:46:17,972 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T20:46:17,973 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T20:46:17,973 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T20:46:17,973 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T20:46:17,974 DEBUG [RS:0;229bab1f9d30:35231 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,35231,1733690776465 2024-12-08T20:46:17,974 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,35231,1733690776465' 2024-12-08T20:46:17,974 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T20:46:17,974 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T20:46:17,975 DEBUG [RS:0;229bab1f9d30:35231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T20:46:17,975 INFO [RS:0;229bab1f9d30:35231 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T20:46:17,975 INFO [RS:0;229bab1f9d30:35231 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T20:46:17,977 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,977 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:17,977 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,40021,1733690776578, RpcServer on 229bab1f9d30/172.17.0.2:40021, sessionid=0x100073b94950002 2024-12-08T20:46:17,977 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,35153,1733690776638, RpcServer on 229bab1f9d30/172.17.0.2:35153, sessionid=0x100073b94950003 2024-12-08T20:46:17,977 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T20:46:17,977 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T20:46:17,977 DEBUG [RS:1;229bab1f9d30:40021 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,40021,1733690776578 2024-12-08T20:46:17,977 DEBUG [RS:2;229bab1f9d30:35153 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,35153,1733690776638 2024-12-08T20:46:17,977 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,40021,1733690776578' 2024-12-08T20:46:17,977 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,35153,1733690776638' 2024-12-08T20:46:17,977 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T20:46:17,978 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T20:46:17,978 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T20:46:17,978 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T20:46:17,979 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T20:46:17,979 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T20:46:17,979 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T20:46:17,979 DEBUG [RS:1;229bab1f9d30:40021 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,40021,1733690776578 2024-12-08T20:46:17,979 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T20:46:17,979 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,40021,1733690776578' 2024-12-08T20:46:17,979 DEBUG [RS:2;229bab1f9d30:35153 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,35153,1733690776638 2024-12-08T20:46:17,979 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T20:46:17,979 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,35153,1733690776638' 2024-12-08T20:46:17,979 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T20:46:17,979 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T20:46:17,980 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T20:46:17,980 DEBUG [RS:1;229bab1f9d30:40021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T20:46:17,980 INFO [RS:1;229bab1f9d30:40021 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T20:46:17,980 INFO [RS:1;229bab1f9d30:40021 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T20:46:17,980 DEBUG [RS:2;229bab1f9d30:35153 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T20:46:17,980 INFO [RS:2;229bab1f9d30:35153 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T20:46:17,980 INFO [RS:2;229bab1f9d30:35153 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T20:46:18,086 INFO [RS:1;229bab1f9d30:40021 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T20:46:18,086 INFO [RS:0;229bab1f9d30:35231 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T20:46:18,086 INFO [RS:2;229bab1f9d30:35153 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T20:46:18,090 INFO [RS:2;229bab1f9d30:35153 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C35153%2C1733690776638, suffix=, logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35153,1733690776638, archiveDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs, maxLogs=32 2024-12-08T20:46:18,090 INFO [RS:0;229bab1f9d30:35231 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C35231%2C1733690776465, suffix=, logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35231,1733690776465, archiveDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs, maxLogs=32 2024-12-08T20:46:18,090 INFO [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C40021%2C1733690776578, suffix=, logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578, archiveDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs, maxLogs=32 2024-12-08T20:46:18,105 DEBUG [RS:2;229bab1f9d30:35153 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35153,1733690776638/229bab1f9d30%2C35153%2C1733690776638.1733690778093, exclude list is [], retry=0 2024-12-08T20:46:18,105 DEBUG [RS:1;229bab1f9d30:40021 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578/229bab1f9d30%2C40021%2C1733690776578.1733690778093, exclude list is [], retry=0 2024-12-08T20:46:18,107 DEBUG [RS:0;229bab1f9d30:35231 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35231,1733690776465/229bab1f9d30%2C35231%2C1733690776465.1733690778093, exclude list is [], retry=0 2024-12-08T20:46:18,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39109,DS-008decea-2955-49eb-a282-8407e50725e2,DISK] 2024-12-08T20:46:18,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39933,DS-d50db4ae-edca-4847-b168-60d42b7f2714,DISK] 2024-12-08T20:46:18,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44679,DS-b4220310-ad5a-47a7-befb-3a3f401363aa,DISK] 2024-12-08T20:46:18,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39933,DS-d50db4ae-edca-4847-b168-60d42b7f2714,DISK] 2024-12-08T20:46:18,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44679,DS-b4220310-ad5a-47a7-befb-3a3f401363aa,DISK] 2024-12-08T20:46:18,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39109,DS-008decea-2955-49eb-a282-8407e50725e2,DISK] 2024-12-08T20:46:18,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39933,DS-d50db4ae-edca-4847-b168-60d42b7f2714,DISK] 2024-12-08T20:46:18,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39109,DS-008decea-2955-49eb-a282-8407e50725e2,DISK] 2024-12-08T20:46:18,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44679,DS-b4220310-ad5a-47a7-befb-3a3f401363aa,DISK] 2024-12-08T20:46:18,155 INFO [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578/229bab1f9d30%2C40021%2C1733690776578.1733690778093 2024-12-08T20:46:18,156 INFO [RS:2;229bab1f9d30:35153 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35153,1733690776638/229bab1f9d30%2C35153%2C1733690776638.1733690778093 2024-12-08T20:46:18,156 INFO [RS:0;229bab1f9d30:35231 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,35231,1733690776465/229bab1f9d30%2C35231%2C1733690776465.1733690778093 2024-12-08T20:46:18,156 DEBUG [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39119:39119),(127.0.0.1/127.0.0.1:38757:38757),(127.0.0.1/127.0.0.1:43211:43211)] 2024-12-08T20:46:18,156 DEBUG [RS:2;229bab1f9d30:35153 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38757:38757),(127.0.0.1/127.0.0.1:39119:39119),(127.0.0.1/127.0.0.1:43211:43211)] 2024-12-08T20:46:18,156 DEBUG [RS:0;229bab1f9d30:35231 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39119:39119),(127.0.0.1/127.0.0.1:38757:38757),(127.0.0.1/127.0.0.1:43211:43211)] 2024-12-08T20:46:18,189 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:18,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:18,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T20:46:18,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T20:46:18,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T20:46:18,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T20:46:18,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T20:46:18,207 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T20:46:18,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T20:46:18,210 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T20:46:18,210 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T20:46:18,212 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740 2024-12-08T20:46:18,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740 2024-12-08T20:46:18,215 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T20:46:18,215 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T20:46:18,216 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T20:46:18,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T20:46:18,223 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T20:46:18,224 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62825439, jitterRate=-0.0638280063867569}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T20:46:18,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733690778191Initializing all the Stores at 1733690778194 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690778194Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690778194Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690778194Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690778194Cleaning up temporary data from old regions at 1733690778215 (+21 ms)Region opened successfully at 1733690778226 (+11 ms) 2024-12-08T20:46:18,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T20:46:18,227 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T20:46:18,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T20:46:18,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T20:46:18,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T20:46:18,228 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T20:46:18,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733690778226Disabling compacts and flushes for region at 1733690778226Disabling writes for close at 1733690778227 (+1 ms)Writing region close event to WAL at 1733690778228 (+1 ms)Closed at 1733690778228 2024-12-08T20:46:18,231 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:18,231 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T20:46:18,238 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T20:46:18,245 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T20:46:18,248 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T20:46:18,401 DEBUG [229bab1f9d30:39487 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-08T20:46:18,410 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(204): Hosts are {229bab1f9d30=0} racks are {/default-rack=0} 2024-12-08T20:46:18,415 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T20:46:18,415 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T20:46:18,416 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T20:46:18,416 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T20:46:18,416 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T20:46:18,416 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T20:46:18,416 INFO [229bab1f9d30:39487 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T20:46:18,416 INFO [229bab1f9d30:39487 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T20:46:18,416 INFO [229bab1f9d30:39487 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T20:46:18,416 DEBUG [229bab1f9d30:39487 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T20:46:18,422 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=229bab1f9d30,40021,1733690776578 2024-12-08T20:46:18,427 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,40021,1733690776578, state=OPENING 2024-12-08T20:46:18,478 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T20:46:18,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:18,487 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:18,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:18,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:18,489 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,489 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,489 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,489 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,491 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T20:46:18,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=229bab1f9d30,40021,1733690776578}] 2024-12-08T20:46:18,672 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T20:46:18,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36907, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T20:46:18,685 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T20:46:18,686 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T20:46:18,686 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-08T20:46:18,690 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C40021%2C1733690776578.meta, suffix=.meta, logDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578, archiveDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs, maxLogs=32 2024-12-08T20:46:18,707 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578/229bab1f9d30%2C40021%2C1733690776578.meta.1733690778692.meta, exclude list is [], retry=0 2024-12-08T20:46:18,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39933,DS-d50db4ae-edca-4847-b168-60d42b7f2714,DISK] 2024-12-08T20:46:18,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44679,DS-b4220310-ad5a-47a7-befb-3a3f401363aa,DISK] 2024-12-08T20:46:18,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39109,DS-008decea-2955-49eb-a282-8407e50725e2,DISK] 2024-12-08T20:46:18,715 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/WALs/229bab1f9d30,40021,1733690776578/229bab1f9d30%2C40021%2C1733690776578.meta.1733690778692.meta 2024-12-08T20:46:18,715 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38757:38757),(127.0.0.1/127.0.0.1:43211:43211),(127.0.0.1/127.0.0.1:39119:39119)] 2024-12-08T20:46:18,716 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T20:46:18,718 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T20:46:18,720 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T20:46:18,724 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T20:46:18,746 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T20:46:18,747 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:18,747 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T20:46:18,747 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T20:46:18,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T20:46:18,753 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T20:46:18,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T20:46:18,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T20:46:18,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T20:46:18,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T20:46:18,759 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T20:46:18,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T20:46:18,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:18,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:18,763 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T20:46:18,765 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740 2024-12-08T20:46:18,770 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740 2024-12-08T20:46:18,773 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T20:46:18,773 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T20:46:18,774 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T20:46:18,777 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T20:46:18,779 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59331238, jitterRate=-0.11589565873146057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T20:46:18,779 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T20:46:18,780 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733690778748Writing region info on filesystem at 1733690778748Initializing all the Stores at 1733690778750 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690778750Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690778751 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690778751Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690778751Cleaning up temporary data from old regions at 1733690778773 (+22 ms)Running coprocessor post-open hooks at 1733690778779 (+6 ms)Region opened successfully at 1733690778780 (+1 ms) 2024-12-08T20:46:18,787 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733690778661 2024-12-08T20:46:18,797 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T20:46:18,798 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T20:46:18,799 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=229bab1f9d30,40021,1733690776578 2024-12-08T20:46:18,801 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,40021,1733690776578, state=OPEN 2024-12-08T20:46:18,854 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:18,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:18,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:18,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:18,854 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,854 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,854 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,854 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:18,855 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=229bab1f9d30,40021,1733690776578 2024-12-08T20:46:18,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T20:46:18,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=229bab1f9d30,40021,1733690776578 in 362 msec 2024-12-08T20:46:18,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T20:46:18,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 628 msec 2024-12-08T20:46:18,870 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:18,870 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T20:46:18,888 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T20:46:18,889 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,40021,1733690776578, seqNum=-1] 2024-12-08T20:46:18,905 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T20:46:18,907 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48833, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T20:46:18,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3030 sec 2024-12-08T20:46:18,929 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733690778929, completionTime=-1 2024-12-08T20:46:18,932 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-08T20:46:18,932 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T20:46:18,959 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-08T20:46:18,959 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733690838959 2024-12-08T20:46:18,959 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733690898959 2024-12-08T20:46:18,959 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-08T20:46:18,961 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-08T20:46:18,968 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39487,1733690775793-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:18,968 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39487,1733690775793-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:18,968 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39487,1733690775793-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:18,970 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-229bab1f9d30:39487, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:18,971 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:18,972 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:18,977 DEBUG [master/229bab1f9d30:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T20:46:19,001 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.280sec 2024-12-08T20:46:19,003 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T20:46:19,004 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T20:46:19,005 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T20:46:19,005 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T20:46:19,005 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T20:46:19,006 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39487,1733690775793-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:19,007 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39487,1733690775793-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T20:46:19,011 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T20:46:19,012 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T20:46:19,012 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39487,1733690775793-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:19,073 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296587b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T20:46:19,077 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T20:46:19,077 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T20:46:19,080 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 229bab1f9d30,39487,-1 for getting cluster id 2024-12-08T20:46:19,082 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T20:46:19,091 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cdb35848-c144-4b72-a6ae-dfd079c9c496' 2024-12-08T20:46:19,094 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T20:46:19,094 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cdb35848-c144-4b72-a6ae-dfd079c9c496" 2024-12-08T20:46:19,094 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70c57997, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T20:46:19,094 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [229bab1f9d30,39487,-1] 2024-12-08T20:46:19,097 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T20:46:19,099 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:19,100 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60278, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T20:46:19,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c738163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T20:46:19,103 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T20:46:19,111 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,40021,1733690776578, seqNum=-1] 2024-12-08T20:46:19,111 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T20:46:19,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40862, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T20:46:19,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=229bab1f9d30,39487,1733690775793 2024-12-08T20:46:19,136 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T20:46:19,140 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 229bab1f9d30,39487,1733690775793 2024-12-08T20:46:19,142 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1a6b23f5 2024-12-08T20:46:19,144 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T20:46:19,146 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T20:46:19,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T20:46:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-08T20:46:19,159 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T20:46:19,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-08T20:46:19,161 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:19,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T20:46:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:19,171 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:19,171 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:19,175 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:33334 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33334 dst: /127.0.0.1:39109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:19,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-08T20:46:19,180 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:19,183 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4486c65d6d2c79cfa15ced745a32a782, NAME => 'TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc 2024-12-08T20:46:19,188 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:19,188 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:19,190 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:44862 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44862 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:19,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-08T20:46:19,195 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:19,195 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:19,196 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4486c65d6d2c79cfa15ced745a32a782, disabling compactions & flushes 2024-12-08T20:46:19,196 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,196 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,196 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. after waiting 0 ms 2024-12-08T20:46:19,196 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,196 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,196 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4486c65d6d2c79cfa15ced745a32a782: Waiting for close lock at 1733690779196Disabling compacts and flushes for region at 1733690779196Disabling writes for close at 1733690779196Writing region close event to WAL at 1733690779196Closed at 1733690779196 2024-12-08T20:46:19,198 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T20:46:19,202 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733690779198"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733690779198"}]},"ts":"1733690779198"} 2024-12-08T20:46:19,206 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T20:46:19,208 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T20:46:19,210 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733690779208"}]},"ts":"1733690779208"} 2024-12-08T20:46:19,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-08T20:46:19,214 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {229bab1f9d30=0} racks are {/default-rack=0} 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T20:46:19,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T20:46:19,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T20:46:19,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T20:46:19,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T20:46:19,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4486c65d6d2c79cfa15ced745a32a782, ASSIGN}] 2024-12-08T20:46:19,220 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4486c65d6d2c79cfa15ced745a32a782, ASSIGN 2024-12-08T20:46:19,222 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4486c65d6d2c79cfa15ced745a32a782, ASSIGN; state=OFFLINE, location=229bab1f9d30,40021,1733690776578; forceNewPlan=false, retain=false 2024-12-08T20:46:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:19,375 INFO [229bab1f9d30:39487 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T20:46:19,376 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4486c65d6d2c79cfa15ced745a32a782, regionState=OPENING, regionLocation=229bab1f9d30,40021,1733690776578 2024-12-08T20:46:19,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4486c65d6d2c79cfa15ced745a32a782, ASSIGN because future has completed 2024-12-08T20:46:19,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4486c65d6d2c79cfa15ced745a32a782, server=229bab1f9d30,40021,1733690776578}] 2024-12-08T20:46:19,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:19,546 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,546 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4486c65d6d2c79cfa15ced745a32a782, NAME => 'TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782.', STARTKEY => '', ENDKEY => ''} 2024-12-08T20:46:19,547 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,547 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:19,547 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,547 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,549 INFO [StoreOpener-4486c65d6d2c79cfa15ced745a32a782-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,551 INFO [StoreOpener-4486c65d6d2c79cfa15ced745a32a782-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4486c65d6d2c79cfa15ced745a32a782 columnFamilyName cf 2024-12-08T20:46:19,551 DEBUG [StoreOpener-4486c65d6d2c79cfa15ced745a32a782-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:19,552 INFO [StoreOpener-4486c65d6d2c79cfa15ced745a32a782-1 {}] regionserver.HStore(327): Store=4486c65d6d2c79cfa15ced745a32a782/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:19,553 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,554 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,555 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,556 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,556 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,559 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,564 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T20:46:19,564 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4486c65d6d2c79cfa15ced745a32a782; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74496553, jitterRate=0.1100851446390152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T20:46:19,565 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:19,565 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4486c65d6d2c79cfa15ced745a32a782: Running coprocessor pre-open hook at 1733690779547Writing region info on filesystem at 1733690779547Initializing all the Stores at 1733690779549 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690779549Cleaning up temporary data from old regions at 1733690779556 (+7 ms)Running coprocessor post-open hooks at 1733690779565 (+9 ms)Region opened successfully at 1733690779565 2024-12-08T20:46:19,567 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782., pid=6, masterSystemTime=1733690779537 2024-12-08T20:46:19,570 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,570 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:19,571 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4486c65d6d2c79cfa15ced745a32a782, regionState=OPEN, openSeqNum=2, regionLocation=229bab1f9d30,40021,1733690776578 2024-12-08T20:46:19,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4486c65d6d2c79cfa15ced745a32a782, server=229bab1f9d30,40021,1733690776578 because future has completed 2024-12-08T20:46:19,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T20:46:19,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4486c65d6d2c79cfa15ced745a32a782, server=229bab1f9d30,40021,1733690776578 in 194 msec 2024-12-08T20:46:19,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T20:46:19,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4486c65d6d2c79cfa15ced745a32a782, ASSIGN in 363 msec 2024-12-08T20:46:19,585 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T20:46:19,585 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733690779585"}]},"ts":"1733690779585"} 2024-12-08T20:46:19,588 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-08T20:46:19,590 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T20:46:19,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 437 msec 2024-12-08T20:46:19,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:19,798 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T20:46:19,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-08T20:46:19,799 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T20:46:19,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-08T20:46:19,804 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T20:46:19,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-08T20:46:19,811 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782., hostname=229bab1f9d30,40021,1733690776578, seqNum=2] 2024-12-08T20:46:19,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-08T20:46:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-08T20:46:19,824 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-08T20:46:19,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:19,826 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T20:46:19,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T20:46:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:19,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40021 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T20:46:19,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:20,003 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4486c65d6d2c79cfa15ced745a32a782 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-08T20:46:20,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/.tmp/cf/a51de8130d22401d9e823a8baf169f7a is 36, key is row/cf:cq/1733690779813/Put/seqid=0 2024-12-08T20:46:20,059 WARN [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:20,059 WARN [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:20,063 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_474312215_22 at /127.0.0.1:34008 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39933:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34008 dst: /127.0.0.1:39933 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-08T20:46:20,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:20,470 WARN [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:20,470 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/.tmp/cf/a51de8130d22401d9e823a8baf169f7a 2024-12-08T20:46:20,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/.tmp/cf/a51de8130d22401d9e823a8baf169f7a as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/cf/a51de8130d22401d9e823a8baf169f7a 2024-12-08T20:46:20,521 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/cf/a51de8130d22401d9e823a8baf169f7a, entries=1, sequenceid=5, filesize=4.7 K 2024-12-08T20:46:20,528 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4486c65d6d2c79cfa15ced745a32a782 in 525ms, sequenceid=5, compaction requested=false 2024-12-08T20:46:20,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-08T20:46:20,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4486c65d6d2c79cfa15ced745a32a782: 2024-12-08T20:46:20,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:20,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T20:46:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T20:46:20,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T20:46:20,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 707 msec 2024-12-08T20:46:20,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 720 msec 2024-12-08T20:46:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-08T20:46:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-08T20:46:20,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-08T20:46:20,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-08T20:46:20,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-08T20:46:20,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-08T20:46:20,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-08T20:46:20,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-08T20:46:20,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-08T20:46:20,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-08T20:46:20,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:20,960 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T20:46:20,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T20:46:20,976 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T20:46:20,976 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:20,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:20,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:20,981 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T20:46:20,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T20:46:20,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=216153882, stopped=false 2024-12-08T20:46:20,981 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=229bab1f9d30,39487,1733690775793 2024-12-08T20:46:21,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:21,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:21,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:21,037 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:21,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:21,037 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:21,037 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T20:46:21,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:21,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:21,038 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T20:46:21,038 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:21,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:21,040 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:21,040 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:21,040 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,35231,1733690776465' ***** 2024-12-08T20:46:21,040 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T20:46:21,040 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,40021,1733690776578' ***** 2024-12-08T20:46:21,041 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T20:46:21,041 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:21,041 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,35153,1733690776638' ***** 2024-12-08T20:46:21,041 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T20:46:21,041 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T20:46:21,041 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T20:46:21,041 INFO [RS:0;229bab1f9d30:35231 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T20:46:21,041 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T20:46:21,041 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T20:46:21,041 INFO [RS:0;229bab1f9d30:35231 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T20:46:21,041 INFO [RS:1;229bab1f9d30:40021 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T20:46:21,041 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T20:46:21,042 INFO [RS:1;229bab1f9d30:40021 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T20:46:21,042 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,35231,1733690776465 2024-12-08T20:46:21,042 INFO [RS:2;229bab1f9d30:35153 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T20:46:21,042 INFO [RS:0;229bab1f9d30:35231 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:21,042 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T20:46:21,042 INFO [RS:2;229bab1f9d30:35153 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T20:46:21,042 INFO [RS:0;229bab1f9d30:35231 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;229bab1f9d30:35231. 2024-12-08T20:46:21,042 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(3091): Received CLOSE for 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:21,042 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,35153,1733690776638 2024-12-08T20:46:21,042 INFO [RS:2;229bab1f9d30:35153 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:21,042 DEBUG [RS:0;229bab1f9d30:35231 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:21,042 DEBUG [RS:0;229bab1f9d30:35231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,042 INFO [RS:2;229bab1f9d30:35153 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;229bab1f9d30:35153. 2024-12-08T20:46:21,042 DEBUG [RS:2;229bab1f9d30:35153 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:21,042 DEBUG [RS:2;229bab1f9d30:35153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,042 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,35231,1733690776465; all regions closed. 2024-12-08T20:46:21,043 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:21,043 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:21,043 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,40021,1733690776578 2024-12-08T20:46:21,043 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,35153,1733690776638; all regions closed. 2024-12-08T20:46:21,043 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:21,043 INFO [RS:1;229bab1f9d30:40021 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:21,043 INFO [RS:1;229bab1f9d30:40021 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;229bab1f9d30:40021. 2024-12-08T20:46:21,043 DEBUG [RS:1;229bab1f9d30:40021 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:21,043 DEBUG [RS:1;229bab1f9d30:40021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,043 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4486c65d6d2c79cfa15ced745a32a782, disabling compactions & flushes 2024-12-08T20:46:21,044 INFO [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:21,044 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T20:46:21,044 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T20:46:21,044 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:21,044 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T20:46:21,044 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. after waiting 0 ms 2024-12-08T20:46:21,044 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T20:46:21,044 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:21,044 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T20:46:21,044 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1325): Online Regions={4486c65d6d2c79cfa15ced745a32a782=TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T20:46:21,044 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T20:46:21,045 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T20:46:21,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T20:46:21,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T20:46:21,045 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4486c65d6d2c79cfa15ced745a32a782 2024-12-08T20:46:21,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T20:46:21,045 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-08T20:46:21,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_1073741828_1018 (size=93) 2024-12-08T20:46:21,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_1073741827_1017 (size=93) 2024-12-08T20:46:21,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741828_1018 (size=93) 2024-12-08T20:46:21,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741827_1017 (size=93) 2024-12-08T20:46:21,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_1073741828_1018 (size=93) 2024-12-08T20:46:21,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_1073741827_1017 (size=93) 2024-12-08T20:46:21,058 DEBUG [RS:2;229bab1f9d30:35153 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs 2024-12-08T20:46:21,058 DEBUG [RS:0;229bab1f9d30:35231 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 229bab1f9d30%2C35153%2C1733690776638:(num 1733690778093) 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 229bab1f9d30%2C35231%2C1733690776465:(num 1733690778093) 2024-12-08T20:46:21,059 DEBUG [RS:2;229bab1f9d30:35153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,059 DEBUG [RS:0;229bab1f9d30:35231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T20:46:21,059 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T20:46:21,059 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T20:46:21,059 INFO [RS:0;229bab1f9d30:35231 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:21,059 INFO [RS:2;229bab1f9d30:35153 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:21,060 INFO [RS:2;229bab1f9d30:35153 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35153 2024-12-08T20:46:21,060 INFO [RS:0;229bab1f9d30:35231 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35231 2024-12-08T20:46:21,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,35153,1733690776638 2024-12-08T20:46:21,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,35231,1733690776465 2024-12-08T20:46:21,069 INFO [RS:2;229bab1f9d30:35153 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:21,069 INFO [RS:0;229bab1f9d30:35231 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:21,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T20:46:21,072 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/default/TestHBaseWalOnEC/4486c65d6d2c79cfa15ced745a32a782/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-08T20:46:21,074 INFO [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:21,074 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4486c65d6d2c79cfa15ced745a32a782: Waiting for close lock at 1733690781043Running coprocessor pre-close hooks at 1733690781043Disabling compacts and flushes for region at 1733690781043Disabling writes for close at 1733690781044 (+1 ms)Writing region close event to WAL at 1733690781049 (+5 ms)Running coprocessor post-close hooks at 1733690781073 (+24 ms)Closed at 1733690781074 (+1 ms) 2024-12-08T20:46:21,074 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782. 2024-12-08T20:46:21,075 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/info/001202a7dccd4093b0bd30d07563626f is 153, key is TestHBaseWalOnEC,,1733690779147.4486c65d6d2c79cfa15ced745a32a782./info:regioninfo/1733690779571/Put/seqid=0 2024-12-08T20:46:21,077 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,078 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,078 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,35231,1733690776465] 2024-12-08T20:46:21,083 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_474312215_22 at /127.0.0.1:56116 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56116 dst: /127.0.0.1:39109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,086 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,35231,1733690776465 already deleted, retry=false 2024-12-08T20:46:21,086 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,35231,1733690776465 expired; onlineServers=2 2024-12-08T20:46:21,086 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,35153,1733690776638] 2024-12-08T20:46:21,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-08T20:46:21,094 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,35153,1733690776638 already deleted, retry=false 2024-12-08T20:46:21,094 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,35153,1733690776638 expired; onlineServers=1 2024-12-08T20:46:21,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:21,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35231-0x100073b94950001, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:21,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:21,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35153-0x100073b94950003, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:21,180 INFO [RS:0;229bab1f9d30:35231 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:21,180 INFO [RS:2;229bab1f9d30:35153 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:21,180 INFO [RS:2;229bab1f9d30:35153 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,35153,1733690776638; zookeeper connection closed. 2024-12-08T20:46:21,180 INFO [RS:0;229bab1f9d30:35231 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,35231,1733690776465; zookeeper connection closed. 2024-12-08T20:46:21,181 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64c2b1aa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64c2b1aa 2024-12-08T20:46:21,181 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@de4b966 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@de4b966 2024-12-08T20:46:21,245 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T20:46:21,446 DEBUG [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T20:46:21,489 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:21,490 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/info/001202a7dccd4093b0bd30d07563626f 2024-12-08T20:46:21,517 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/ns/3852d3957ce74c2b98e1505b9cb09222 is 43, key is default/ns:d/1733690778913/Put/seqid=0 2024-12-08T20:46:21,519 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,519 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,522 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_474312215_22 at /127.0.0.1:49478 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39933:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49478 dst: /127.0.0.1:39933 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-08T20:46:21,526 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:21,527 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/ns/3852d3957ce74c2b98e1505b9cb09222 2024-12-08T20:46:21,552 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/table/a547898e3d8f435d9fe34f3d57da9116 is 52, key is TestHBaseWalOnEC/table:state/1733690779585/Put/seqid=0 2024-12-08T20:46:21,554 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,554 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,560 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_474312215_22 at /127.0.0.1:45736 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45736 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-08T20:46:21,565 WARN [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:21,565 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/table/a547898e3d8f435d9fe34f3d57da9116 2024-12-08T20:46:21,575 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/info/001202a7dccd4093b0bd30d07563626f as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/info/001202a7dccd4093b0bd30d07563626f 2024-12-08T20:46:21,585 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/info/001202a7dccd4093b0bd30d07563626f, entries=10, sequenceid=11, filesize=6.5 K 2024-12-08T20:46:21,587 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/ns/3852d3957ce74c2b98e1505b9cb09222 as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/ns/3852d3957ce74c2b98e1505b9cb09222 2024-12-08T20:46:21,596 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/ns/3852d3957ce74c2b98e1505b9cb09222, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T20:46:21,598 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/.tmp/table/a547898e3d8f435d9fe34f3d57da9116 as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/table/a547898e3d8f435d9fe34f3d57da9116 2024-12-08T20:46:21,607 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/table/a547898e3d8f435d9fe34f3d57da9116, entries=2, sequenceid=11, filesize=5.1 K 2024-12-08T20:46:21,608 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 563ms, sequenceid=11, compaction requested=false 2024-12-08T20:46:21,608 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T20:46:21,615 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T20:46:21,616 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T20:46:21,616 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T20:46:21,617 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733690781044Running coprocessor pre-close hooks at 1733690781044Disabling compacts and flushes for region at 1733690781044Disabling writes for close at 1733690781045 (+1 ms)Obtaining lock to block concurrent updates at 1733690781045Preparing flush snapshotting stores in 1588230740 at 1733690781045Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733690781046 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733690781047 (+1 ms)Flushing 1588230740/info: creating writer at 1733690781047Flushing 1588230740/info: appending metadata at 1733690781071 (+24 ms)Flushing 1588230740/info: closing flushed file at 1733690781071Flushing 1588230740/ns: creating writer at 1733690781502 (+431 ms)Flushing 1588230740/ns: appending metadata at 1733690781516 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733690781516Flushing 1588230740/table: creating writer at 1733690781535 (+19 ms)Flushing 1588230740/table: appending metadata at 1733690781551 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733690781551Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e0e5cd4: reopening flushed file at 1733690781573 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@778f13ea: reopening flushed file at 1733690781585 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e764646: reopening flushed file at 1733690781596 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 563ms, sequenceid=11, compaction requested=false at 1733690781608 (+12 ms)Writing region close event to WAL at 1733690781610 (+2 ms)Running coprocessor post-close hooks at 1733690781616 (+6 ms)Closed at 1733690781616 2024-12-08T20:46:21,617 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T20:46:21,646 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,40021,1733690776578; all regions closed. 2024-12-08T20:46:21,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741829_1019 (size=2751) 2024-12-08T20:46:21,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_1073741829_1019 (size=2751) 2024-12-08T20:46:21,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_1073741829_1019 (size=2751) 2024-12-08T20:46:21,656 DEBUG [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs 2024-12-08T20:46:21,656 INFO [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 229bab1f9d30%2C40021%2C1733690776578.meta:.meta(num 1733690778692) 2024-12-08T20:46:21,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_1073741826_1016 (size=1298) 2024-12-08T20:46:21,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741826_1016 (size=1298) 2024-12-08T20:46:21,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_1073741826_1016 (size=1298) 2024-12-08T20:46:21,663 DEBUG [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/oldWALs 2024-12-08T20:46:21,663 INFO [RS:1;229bab1f9d30:40021 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 229bab1f9d30%2C40021%2C1733690776578:(num 1733690778093) 2024-12-08T20:46:21,663 DEBUG [RS:1;229bab1f9d30:40021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:21,663 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:21,663 INFO [RS:1;229bab1f9d30:40021 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:21,663 INFO [RS:1;229bab1f9d30:40021 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:21,663 INFO [RS:1;229bab1f9d30:40021 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:21,663 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:21,663 INFO [RS:1;229bab1f9d30:40021 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40021 2024-12-08T20:46:21,719 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,40021,1733690776578 2024-12-08T20:46:21,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T20:46:21,720 INFO [RS:1;229bab1f9d30:40021 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:21,728 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,40021,1733690776578] 2024-12-08T20:46:21,736 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,40021,1733690776578 already deleted, retry=false 2024-12-08T20:46:21,736 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,40021,1733690776578 expired; onlineServers=0 2024-12-08T20:46:21,737 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '229bab1f9d30,39487,1733690775793' ***** 2024-12-08T20:46:21,737 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T20:46:21,737 INFO [M:0;229bab1f9d30:39487 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:21,737 INFO [M:0;229bab1f9d30:39487 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:21,738 DEBUG [M:0;229bab1f9d30:39487 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T20:46:21,738 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T20:46:21,738 DEBUG [M:0;229bab1f9d30:39487 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T20:46:21,738 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733690777757 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733690777757,5,FailOnTimeoutGroup] 2024-12-08T20:46:21,738 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733690777752 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733690777752,5,FailOnTimeoutGroup] 2024-12-08T20:46:21,739 INFO [M:0;229bab1f9d30:39487 {}] hbase.ChoreService(370): Chore service for: master/229bab1f9d30:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:21,739 INFO [M:0;229bab1f9d30:39487 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:21,740 DEBUG [M:0;229bab1f9d30:39487 {}] master.HMaster(1795): Stopping service threads 2024-12-08T20:46:21,740 INFO [M:0;229bab1f9d30:39487 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T20:46:21,740 INFO [M:0;229bab1f9d30:39487 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T20:46:21,741 INFO [M:0;229bab1f9d30:39487 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T20:46:21,742 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T20:46:21,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:21,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:21,744 DEBUG [M:0;229bab1f9d30:39487 {}] zookeeper.ZKUtil(347): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T20:46:21,745 WARN [M:0;229bab1f9d30:39487 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T20:46:21,746 INFO [M:0;229bab1f9d30:39487 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/.lastflushedseqids 2024-12-08T20:46:21,755 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,755 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,757 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:49492 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39933:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49492 dst: /127.0.0.1:39933 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-08T20:46:21,761 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:21,762 INFO [M:0;229bab1f9d30:39487 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T20:46:21,762 INFO [M:0;229bab1f9d30:39487 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T20:46:21,762 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T20:46:21,762 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:21,762 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:21,762 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T20:46:21,762 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:21,762 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-08T20:46:21,779 DEBUG [M:0;229bab1f9d30:39487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04b858108d0a49d595b79ca2ec05676f is 82, key is hbase:meta,,1/info:regioninfo/1733690778799/Put/seqid=0 2024-12-08T20:46:21,781 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,781 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:45756 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:44679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45756 dst: /127.0.0.1:44679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-08T20:46:21,788 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:21,788 INFO [M:0;229bab1f9d30:39487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04b858108d0a49d595b79ca2ec05676f 2024-12-08T20:46:21,810 DEBUG [M:0;229bab1f9d30:39487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/43e5d8f3c2c0417cacec29bfcc2f9347 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733690779592/Put/seqid=0 2024-12-08T20:46:21,812 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,812 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:49502 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:39933:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49502 dst: /127.0.0.1:39933 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-08T20:46:21,819 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:21,819 INFO [M:0;229bab1f9d30:39487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/43e5d8f3c2c0417cacec29bfcc2f9347 2024-12-08T20:46:21,828 INFO [RS:1;229bab1f9d30:40021 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:21,828 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:21,828 INFO [RS:1;229bab1f9d30:40021 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,40021,1733690776578; zookeeper connection closed. 2024-12-08T20:46:21,828 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40021-0x100073b94950002, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:21,828 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@79a1d8fb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@79a1d8fb 2024-12-08T20:46:21,829 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-08T20:46:21,841 DEBUG [M:0;229bab1f9d30:39487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8025118589344f9bafd7d70a7786313 is 69, key is 229bab1f9d30,35153,1733690776638/rs:state/1733690777810/Put/seqid=0 2024-12-08T20:46:21,842 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,842 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T20:46:21,845 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1402024005_22 at /127.0.0.1:56144 [Receiving block BP-1275018439-172.17.0.2-1733690772010:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56144 dst: /127.0.0.1:39109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T20:46:21,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-08T20:46:22,251 WARN [M:0;229bab1f9d30:39487 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T20:46:22,251 INFO [M:0;229bab1f9d30:39487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8025118589344f9bafd7d70a7786313 2024-12-08T20:46:22,264 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04b858108d0a49d595b79ca2ec05676f as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04b858108d0a49d595b79ca2ec05676f 2024-12-08T20:46:22,273 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04b858108d0a49d595b79ca2ec05676f, entries=8, sequenceid=72, filesize=5.5 K 2024-12-08T20:46:22,274 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/43e5d8f3c2c0417cacec29bfcc2f9347 as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/43e5d8f3c2c0417cacec29bfcc2f9347 2024-12-08T20:46:22,282 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/43e5d8f3c2c0417cacec29bfcc2f9347, entries=8, sequenceid=72, filesize=6.3 K 2024-12-08T20:46:22,284 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8025118589344f9bafd7d70a7786313 as hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e8025118589344f9bafd7d70a7786313 2024-12-08T20:46:22,292 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e8025118589344f9bafd7d70a7786313, entries=3, sequenceid=72, filesize=5.2 K 2024-12-08T20:46:22,293 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 531ms, sequenceid=72, compaction requested=false 2024-12-08T20:46:22,294 INFO [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:22,295 DEBUG [M:0;229bab1f9d30:39487 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733690781762Disabling compacts and flushes for region at 1733690781762Disabling writes for close at 1733690781762Obtaining lock to block concurrent updates at 1733690781762Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733690781762Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733690781763 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733690781764 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733690781764Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733690781779 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733690781779Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733690781796 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733690781810 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733690781810Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733690781825 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733690781840 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733690781840Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d30a913: reopening flushed file at 1733690782263 (+423 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@560dfc1b: reopening flushed file at 1733690782273 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bb33b9e: reopening flushed file at 1733690782282 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 531ms, sequenceid=72, compaction requested=false at 1733690782293 (+11 ms)Writing region close event to WAL at 1733690782294 (+1 ms)Closed at 1733690782294 2024-12-08T20:46:22,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39933 is added to blk_1073741825_1011 (size=32674) 2024-12-08T20:46:22,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39109 is added to blk_1073741825_1011 (size=32674) 2024-12-08T20:46:22,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741825_1011 (size=32674) 2024-12-08T20:46:22,299 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:22,299 INFO [M:0;229bab1f9d30:39487 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T20:46:22,299 INFO [M:0;229bab1f9d30:39487 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39487 2024-12-08T20:46:22,299 INFO [M:0;229bab1f9d30:39487 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:22,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:22,461 INFO [M:0;229bab1f9d30:39487 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:22,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39487-0x100073b94950000, quorum=127.0.0.1:60655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:22,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:22,506 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:22,506 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:22,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:22,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:22,511 WARN [BP-1275018439-172.17.0.2-1733690772010 heartbeating to localhost/127.0.0.1:43629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T20:46:22,511 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T20:46:22,511 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T20:46:22,511 WARN [BP-1275018439-172.17.0.2-1733690772010 heartbeating to localhost/127.0.0.1:43629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1275018439-172.17.0.2-1733690772010 (Datanode Uuid ecdacbb5-e4cb-4432-a7b7-66590527d01f) service to localhost/127.0.0.1:43629 2024-12-08T20:46:22,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data5/current/BP-1275018439-172.17.0.2-1733690772010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:22,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data6/current/BP-1275018439-172.17.0.2-1733690772010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:22,514 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T20:46:22,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:22,520 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:22,520 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:22,520 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:22,520 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:22,521 WARN [BP-1275018439-172.17.0.2-1733690772010 heartbeating to localhost/127.0.0.1:43629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T20:46:22,521 WARN [BP-1275018439-172.17.0.2-1733690772010 heartbeating to localhost/127.0.0.1:43629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1275018439-172.17.0.2-1733690772010 (Datanode Uuid c41ca5ac-58d8-4577-9448-6d6184fbdc01) service to localhost/127.0.0.1:43629 2024-12-08T20:46:22,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data3/current/BP-1275018439-172.17.0.2-1733690772010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:22,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data4/current/BP-1275018439-172.17.0.2-1733690772010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:22,522 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T20:46:22,522 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T20:46:22,522 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T20:46:22,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:22,524 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:22,524 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:22,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:22,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:22,526 WARN [BP-1275018439-172.17.0.2-1733690772010 heartbeating to localhost/127.0.0.1:43629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T20:46:22,526 WARN [BP-1275018439-172.17.0.2-1733690772010 heartbeating to localhost/127.0.0.1:43629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1275018439-172.17.0.2-1733690772010 (Datanode Uuid d1ee03ba-1f3d-4536-afff-e562885d5ca5) service to localhost/127.0.0.1:43629 2024-12-08T20:46:22,526 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T20:46:22,526 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T20:46:22,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data1/current/BP-1275018439-172.17.0.2-1733690772010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:22,527 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/cluster_e4dc1441-400b-5d5f-4f53-d5286e921cc7/data/data2/current/BP-1275018439-172.17.0.2-1733690772010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:22,527 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T20:46:22,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T20:46:22,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:22,537 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:22,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:22,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:22,545 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T20:46:22,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T20:46:22,577 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=90 (was 160), OpenFileDescriptor=447 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=186 (was 185) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=16927 (was 17194) 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=90, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=186, ProcessCount=11, AvailableMemoryMB=16927 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.log.dir so I do NOT create it in target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/12144fc0-edbe-d46a-c05f-c48a92cacbb0/hadoop.tmp.dir so I do NOT create it in target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2, deleteOnExit=true 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/test.cache.data in system properties and HBase conf 2024-12-08T20:46:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T20:46:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir in system properties and HBase conf 2024-12-08T20:46:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T20:46:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T20:46:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T20:46:22,583 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T20:46:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T20:46:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/nfs.dump.dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/java.io.tmpdir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T20:46:22,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T20:46:22,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T20:46:22,916 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:22,921 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:22,922 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:22,922 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:22,922 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T20:46:22,923 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:22,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fbe2fc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:22,924 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14abb266{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:23,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60edc840{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/java.io.tmpdir/jetty-localhost-35387-hadoop-hdfs-3_4_1-tests_jar-_-any-13290924504934314634/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T20:46:23,016 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@362f3414{HTTP/1.1, (http/1.1)}{localhost:35387} 2024-12-08T20:46:23,016 INFO [Time-limited test {}] server.Server(415): Started @12670ms 2024-12-08T20:46:23,211 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:23,215 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:23,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:23,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:23,216 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T20:46:23,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@427407e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:23,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e639fe5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:23,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ec581d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/java.io.tmpdir/jetty-localhost-33333-hadoop-hdfs-3_4_1-tests_jar-_-any-13868192694313655474/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:23,306 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b495d6d{HTTP/1.1, (http/1.1)}{localhost:33333} 2024-12-08T20:46:23,306 INFO [Time-limited test {}] server.Server(415): Started @12959ms 2024-12-08T20:46:23,307 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T20:46:23,335 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:23,338 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:23,339 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:23,339 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:23,339 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T20:46:23,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63ea337e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:23,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a06885{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:23,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@738646c4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/java.io.tmpdir/jetty-localhost-35635-hadoop-hdfs-3_4_1-tests_jar-_-any-7344100560013540762/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:23,429 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39163242{HTTP/1.1, (http/1.1)}{localhost:35635} 2024-12-08T20:46:23,429 INFO [Time-limited test {}] server.Server(415): Started @13083ms 2024-12-08T20:46:23,430 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T20:46:23,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T20:46:23,458 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T20:46:23,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T20:46:23,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T20:46:23,459 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T20:46:23,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19b3fbfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,AVAILABLE} 2024-12-08T20:46:23,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6861a8ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T20:46:23,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1dd777e0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/java.io.tmpdir/jetty-localhost-37933-hadoop-hdfs-3_4_1-tests_jar-_-any-11115311806271684377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:23,563 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@d8758e7{HTTP/1.1, (http/1.1)}{localhost:37933} 2024-12-08T20:46:23,563 INFO [Time-limited test {}] server.Server(415): Started @13216ms 2024-12-08T20:46:23,564 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T20:46:24,143 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data1/current/BP-1421637431-172.17.0.2-1733690782608/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:24,143 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data2/current/BP-1421637431-172.17.0.2-1733690782608/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:24,162 WARN [Thread-506 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T20:46:24,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6672a2bd889eb258 with lease ID 0xc5df7f516f1eafbd: Processing first storage report for DS-18d7f719-0563-4ba5-814a-27d7e02f3604 from datanode DatanodeRegistration(127.0.0.1:34285, datanodeUuid=87a2727d-b3a3-4ecf-a9e8-2ee106afb1eb, infoPort=42937, infoSecurePort=0, ipcPort=35895, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608) 2024-12-08T20:46:24,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6672a2bd889eb258 with lease ID 0xc5df7f516f1eafbd: from storage DS-18d7f719-0563-4ba5-814a-27d7e02f3604 node DatanodeRegistration(127.0.0.1:34285, datanodeUuid=87a2727d-b3a3-4ecf-a9e8-2ee106afb1eb, infoPort=42937, infoSecurePort=0, ipcPort=35895, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6672a2bd889eb258 with lease ID 0xc5df7f516f1eafbd: Processing first storage report for DS-7bab1c34-9d14-4a20-bdce-9752716f566b from datanode DatanodeRegistration(127.0.0.1:34285, datanodeUuid=87a2727d-b3a3-4ecf-a9e8-2ee106afb1eb, infoPort=42937, infoSecurePort=0, ipcPort=35895, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608) 2024-12-08T20:46:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6672a2bd889eb258 with lease ID 0xc5df7f516f1eafbd: from storage DS-7bab1c34-9d14-4a20-bdce-9752716f566b node DatanodeRegistration(127.0.0.1:34285, datanodeUuid=87a2727d-b3a3-4ecf-a9e8-2ee106afb1eb, infoPort=42937, infoSecurePort=0, ipcPort=35895, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T20:46:24,256 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T20:46:24,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T20:46:24,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T20:46:24,433 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data3/current/BP-1421637431-172.17.0.2-1733690782608/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:24,433 WARN [Thread-579 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data4/current/BP-1421637431-172.17.0.2-1733690782608/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:24,448 WARN [Thread-529 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T20:46:24,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf37f09db3febafbb with lease ID 0xc5df7f516f1eafbe: Processing first storage report for DS-4502ed53-b9b3-45f5-b653-988b701676ce from datanode DatanodeRegistration(127.0.0.1:43581, datanodeUuid=965387b1-49ff-4198-8910-d17ab35a5c54, infoPort=40735, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608) 2024-12-08T20:46:24,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf37f09db3febafbb with lease ID 0xc5df7f516f1eafbe: from storage DS-4502ed53-b9b3-45f5-b653-988b701676ce node DatanodeRegistration(127.0.0.1:43581, datanodeUuid=965387b1-49ff-4198-8910-d17ab35a5c54, infoPort=40735, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:24,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf37f09db3febafbb with lease ID 0xc5df7f516f1eafbe: Processing first storage report for DS-327a3122-fbb9-41de-b994-0b68fc6ed098 from datanode DatanodeRegistration(127.0.0.1:43581, datanodeUuid=965387b1-49ff-4198-8910-d17ab35a5c54, infoPort=40735, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608) 2024-12-08T20:46:24,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf37f09db3febafbb with lease ID 0xc5df7f516f1eafbe: from storage DS-327a3122-fbb9-41de-b994-0b68fc6ed098 node DatanodeRegistration(127.0.0.1:43581, datanodeUuid=965387b1-49ff-4198-8910-d17ab35a5c54, infoPort=40735, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:24,565 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data5/current/BP-1421637431-172.17.0.2-1733690782608/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:24,565 WARN [Thread-590 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data6/current/BP-1421637431-172.17.0.2-1733690782608/current, will proceed with Du for space computation calculation, 2024-12-08T20:46:24,586 WARN [Thread-551 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T20:46:24,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fdb2f9ecd0bbddf with lease ID 0xc5df7f516f1eafbf: Processing first storage report for DS-af417989-3940-4a40-9565-0839192431b7 from datanode DatanodeRegistration(127.0.0.1:38287, datanodeUuid=75be7945-4c10-4158-b655-f9c4c6b12e00, infoPort=40867, infoSecurePort=0, ipcPort=39889, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608) 2024-12-08T20:46:24,589 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fdb2f9ecd0bbddf with lease ID 0xc5df7f516f1eafbf: from storage DS-af417989-3940-4a40-9565-0839192431b7 node DatanodeRegistration(127.0.0.1:38287, datanodeUuid=75be7945-4c10-4158-b655-f9c4c6b12e00, infoPort=40867, infoSecurePort=0, ipcPort=39889, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:24,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fdb2f9ecd0bbddf with lease ID 0xc5df7f516f1eafbf: Processing first storage report for DS-67359827-3d0a-4b1d-b5cf-6c55b74991ac from datanode DatanodeRegistration(127.0.0.1:38287, datanodeUuid=75be7945-4c10-4158-b655-f9c4c6b12e00, infoPort=40867, infoSecurePort=0, ipcPort=39889, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608) 2024-12-08T20:46:24,589 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fdb2f9ecd0bbddf with lease ID 0xc5df7f516f1eafbf: from storage DS-67359827-3d0a-4b1d-b5cf-6c55b74991ac node DatanodeRegistration(127.0.0.1:38287, datanodeUuid=75be7945-4c10-4158-b655-f9c4c6b12e00, infoPort=40867, infoSecurePort=0, ipcPort=39889, storageInfo=lv=-57;cid=testClusterID;nsid=252792909;c=1733690782608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T20:46:24,601 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c 2024-12-08T20:46:24,628 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/zookeeper_0, clientPort=57990, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T20:46:24,629 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57990 2024-12-08T20:46:24,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,631 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741825_1001 (size=7) 2024-12-08T20:46:24,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741825_1001 (size=7) 2024-12-08T20:46:24,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741825_1001 (size=7) 2024-12-08T20:46:24,651 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8 with version=8 2024-12-08T20:46:24,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43629/user/jenkins/test-data/17fcd949-d30b-688a-e258-9e851a75e7fc/hbase-staging 2024-12-08T20:46:24,653 INFO [Time-limited test {}] client.ConnectionUtils(128): master/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T20:46:24,654 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:24,655 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41941 2024-12-08T20:46:24,656 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41941 connecting to ZooKeeper ensemble=127.0.0.1:57990 2024-12-08T20:46:24,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419410x0, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:24,704 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41941-0x100073bba100000 connected 2024-12-08T20:46:24,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,779 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:24,780 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8, hbase.cluster.distributed=false 2024-12-08T20:46:24,781 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:24,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41941 2024-12-08T20:46:24,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41941 2024-12-08T20:46:24,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41941 2024-12-08T20:46:24,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41941 2024-12-08T20:46:24,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41941 2024-12-08T20:46:24,797 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T20:46:24,797 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:24,798 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36579 2024-12-08T20:46:24,799 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36579 connecting to ZooKeeper ensemble=127.0.0.1:57990 2024-12-08T20:46:24,800 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365790x0, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:24,811 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:24,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36579-0x100073bba100001 connected 2024-12-08T20:46:24,811 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T20:46:24,812 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T20:46:24,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T20:46:24,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:24,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36579 2024-12-08T20:46:24,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36579 2024-12-08T20:46:24,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36579 2024-12-08T20:46:24,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36579 2024-12-08T20:46:24,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36579 2024-12-08T20:46:24,829 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:24,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,830 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:24,830 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,830 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:24,830 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T20:46:24,830 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:24,830 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33651 2024-12-08T20:46:24,832 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33651 connecting to ZooKeeper ensemble=127.0.0.1:57990 2024-12-08T20:46:24,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336510x0, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:24,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:24,844 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33651-0x100073bba100002 connected 2024-12-08T20:46:24,845 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T20:46:24,845 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T20:46:24,846 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T20:46:24,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:24,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33651 2024-12-08T20:46:24,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33651 2024-12-08T20:46:24,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33651 2024-12-08T20:46:24,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33651 2024-12-08T20:46:24,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33651 2024-12-08T20:46:24,865 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T20:46:24,865 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,865 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,866 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T20:46:24,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T20:46:24,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T20:46:24,866 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T20:46:24,866 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T20:46:24,866 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43217 2024-12-08T20:46:24,868 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43217 connecting to ZooKeeper ensemble=127.0.0.1:57990 2024-12-08T20:46:24,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,869 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432170x0, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T20:46:24,877 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432170x0, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:24,878 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43217-0x100073bba100003 connected 2024-12-08T20:46:24,878 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T20:46:24,878 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T20:46:24,879 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T20:46:24,880 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T20:46:24,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43217 2024-12-08T20:46:24,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43217 2024-12-08T20:46:24,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43217 2024-12-08T20:46:24,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43217 2024-12-08T20:46:24,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43217 2024-12-08T20:46:24,898 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;229bab1f9d30:41941 2024-12-08T20:46:24,898 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/229bab1f9d30,41941,1733690784653 2024-12-08T20:46:24,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,903 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/229bab1f9d30,41941,1733690784653 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,911 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T20:46:24,912 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/229bab1f9d30,41941,1733690784653 from backup master directory 2024-12-08T20:46:24,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/229bab1f9d30,41941,1733690784653 2024-12-08T20:46:24,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T20:46:24,919 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:24,919 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=229bab1f9d30,41941,1733690784653 2024-12-08T20:46:24,927 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/hbase.id] with ID: a1c5146d-2918-4c77-b93a-6c88e3833bcb 2024-12-08T20:46:24,927 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/.tmp/hbase.id 2024-12-08T20:46:24,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741826_1002 (size=42) 2024-12-08T20:46:24,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741826_1002 (size=42) 2024-12-08T20:46:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741826_1002 (size=42) 2024-12-08T20:46:24,935 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/.tmp/hbase.id]:[hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/hbase.id] 2024-12-08T20:46:24,952 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T20:46:24,952 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T20:46:24,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-08T20:46:24,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:24,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741827_1003 (size=196) 2024-12-08T20:46:24,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741827_1003 (size=196) 2024-12-08T20:46:24,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741827_1003 (size=196) 2024-12-08T20:46:24,970 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T20:46:24,971 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T20:46:24,971 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T20:46:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741828_1004 (size=1189) 2024-12-08T20:46:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741828_1004 (size=1189) 2024-12-08T20:46:24,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741828_1004 (size=1189) 2024-12-08T20:46:24,984 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store 2024-12-08T20:46:24,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741829_1005 (size=34) 2024-12-08T20:46:24,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741829_1005 (size=34) 2024-12-08T20:46:24,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741829_1005 (size=34) 2024-12-08T20:46:24,992 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:24,993 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T20:46:24,993 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:24,993 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:24,993 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T20:46:24,993 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:24,993 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:24,993 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733690784993Disabling compacts and flushes for region at 1733690784993Disabling writes for close at 1733690784993Writing region close event to WAL at 1733690784993Closed at 1733690784993 2024-12-08T20:46:24,994 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/.initializing 2024-12-08T20:46:24,994 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/WALs/229bab1f9d30,41941,1733690784653 2024-12-08T20:46:24,997 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C41941%2C1733690784653, suffix=, logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/WALs/229bab1f9d30,41941,1733690784653, archiveDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/oldWALs, maxLogs=10 2024-12-08T20:46:24,998 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 229bab1f9d30%2C41941%2C1733690784653.1733690784997 2024-12-08T20:46:25,006 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/WALs/229bab1f9d30,41941,1733690784653/229bab1f9d30%2C41941%2C1733690784653.1733690784997 2024-12-08T20:46:25,008 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40867:40867),(127.0.0.1/127.0.0.1:42937:42937),(127.0.0.1/127.0.0.1:40735:40735)] 2024-12-08T20:46:25,008 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T20:46:25,009 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:25,009 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,009 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T20:46:25,012 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,013 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,013 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T20:46:25,014 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:25,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T20:46:25,017 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:25,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T20:46:25,020 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:25,021 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,022 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,022 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,024 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,024 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,025 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T20:46:25,027 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T20:46:25,030 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T20:46:25,030 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74401702, jitterRate=0.10867175459861755}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T20:46:25,031 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733690785009Initializing all the Stores at 1733690785010 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785010Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690785010Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690785010Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690785010Cleaning up temporary data from old regions at 1733690785024 (+14 ms)Region opened successfully at 1733690785031 (+7 ms) 2024-12-08T20:46:25,032 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T20:46:25,035 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0de83f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:25,036 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T20:46:25,037 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T20:46:25,037 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T20:46:25,037 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T20:46:25,038 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T20:46:25,038 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T20:46:25,038 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T20:46:25,040 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T20:46:25,041 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T20:46:25,052 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T20:46:25,052 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T20:46:25,053 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T20:46:25,060 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T20:46:25,061 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T20:46:25,062 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T20:46:25,069 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T20:46:25,070 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T20:46:25,077 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T20:46:25,079 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T20:46:25,085 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,094 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=229bab1f9d30,41941,1733690784653, sessionid=0x100073bba100000, setting cluster-up flag (Was=false) 2024-12-08T20:46:25,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,136 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T20:46:25,140 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,41941,1733690784653 2024-12-08T20:46:25,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,185 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T20:46:25,187 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,41941,1733690784653 2024-12-08T20:46:25,188 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T20:46:25,190 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:25,191 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T20:46:25,191 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T20:46:25,191 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 229bab1f9d30,41941,1733690784653 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/229bab1f9d30:0, corePoolSize=10, maxPoolSize=10 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:25,193 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733690815194 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T20:46:25,194 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,195 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T20:46:25,195 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T20:46:25,195 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T20:46:25,195 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:25,195 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T20:46:25,197 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,197 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T20:46:25,197 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T20:46:25,197 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T20:46:25,197 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733690785197,5,FailOnTimeoutGroup] 2024-12-08T20:46:25,198 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733690785197,5,FailOnTimeoutGroup] 2024-12-08T20:46:25,198 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,198 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T20:46:25,198 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,198 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741831_1007 (size=1321) 2024-12-08T20:46:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741831_1007 (size=1321) 2024-12-08T20:46:25,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741831_1007 (size=1321) 2024-12-08T20:46:25,208 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T20:46:25,208 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8 2024-12-08T20:46:25,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741832_1008 (size=32) 2024-12-08T20:46:25,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741832_1008 (size=32) 2024-12-08T20:46:25,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741832_1008 (size=32) 2024-12-08T20:46:25,217 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:25,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T20:46:25,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T20:46:25,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T20:46:25,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T20:46:25,222 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T20:46:25,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T20:46:25,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T20:46:25,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T20:46:25,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T20:46:25,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740 2024-12-08T20:46:25,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740 2024-12-08T20:46:25,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T20:46:25,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T20:46:25,232 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T20:46:25,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T20:46:25,236 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T20:46:25,236 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74500649, jitterRate=0.1101461797952652}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T20:46:25,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733690785217Initializing all the Stores at 1733690785218 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785218Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785218Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690785218Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785218Cleaning up temporary data from old regions at 1733690785231 (+13 ms)Region opened successfully at 1733690785238 (+7 ms) 2024-12-08T20:46:25,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T20:46:25,238 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T20:46:25,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T20:46:25,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T20:46:25,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T20:46:25,239 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T20:46:25,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733690785238Disabling compacts and flushes for region at 1733690785238Disabling writes for close at 1733690785238Writing region close event to WAL at 1733690785239 (+1 ms)Closed at 1733690785239 2024-12-08T20:46:25,241 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:25,241 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T20:46:25,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T20:46:25,243 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T20:46:25,245 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T20:46:25,286 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(746): ClusterId : a1c5146d-2918-4c77-b93a-6c88e3833bcb 2024-12-08T20:46:25,286 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(746): ClusterId : a1c5146d-2918-4c77-b93a-6c88e3833bcb 2024-12-08T20:46:25,286 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(746): ClusterId : a1c5146d-2918-4c77-b93a-6c88e3833bcb 2024-12-08T20:46:25,286 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T20:46:25,286 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T20:46:25,286 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T20:46:25,328 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T20:46:25,328 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T20:46:25,328 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T20:46:25,328 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T20:46:25,328 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T20:46:25,328 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T20:46:25,344 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T20:46:25,344 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T20:46:25,345 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T20:46:25,345 DEBUG [RS:1;229bab1f9d30:33651 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@169d1d82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:25,345 DEBUG [RS:0;229bab1f9d30:36579 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1080fefb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:25,345 DEBUG [RS:2;229bab1f9d30:43217 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b142989, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T20:46:25,354 DEBUG [RS:0;229bab1f9d30:36579 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;229bab1f9d30:36579 2024-12-08T20:46:25,354 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;229bab1f9d30:33651 2024-12-08T20:46:25,355 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T20:46:25,355 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T20:46:25,355 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T20:46:25,355 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T20:46:25,355 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T20:46:25,355 DEBUG [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T20:46:25,356 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,41941,1733690784653 with port=33651, startcode=1733690784829 2024-12-08T20:46:25,356 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,41941,1733690784653 with port=36579, startcode=1733690784797 2024-12-08T20:46:25,356 DEBUG [RS:1;229bab1f9d30:33651 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T20:46:25,356 DEBUG [RS:0;229bab1f9d30:36579 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T20:46:25,358 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;229bab1f9d30:43217 2024-12-08T20:46:25,358 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56515, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T20:46:25,359 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T20:46:25,359 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T20:46:25,359 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T20:46:25,359 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,359 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,359 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42937, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T20:46:25,360 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,41941,1733690784653 with port=43217, startcode=1733690784865 2024-12-08T20:46:25,360 DEBUG [RS:2;229bab1f9d30:43217 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T20:46:25,362 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48795, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T20:46:25,362 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8 2024-12-08T20:46:25,362 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,36579,1733690784797 2024-12-08T20:46:25,362 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35551 2024-12-08T20:46:25,362 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T20:46:25,362 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,36579,1733690784797 2024-12-08T20:46:25,365 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,43217,1733690784865 2024-12-08T20:46:25,365 DEBUG [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8 2024-12-08T20:46:25,365 DEBUG [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35551 2024-12-08T20:46:25,365 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,43217,1733690784865 2024-12-08T20:46:25,365 DEBUG [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T20:46:25,367 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8 2024-12-08T20:46:25,367 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35551 2024-12-08T20:46:25,367 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T20:46:25,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T20:46:25,395 WARN [229bab1f9d30:41941 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T20:46:25,396 DEBUG [RS:1;229bab1f9d30:33651 {}] zookeeper.ZKUtil(111): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,396 WARN [RS:1;229bab1f9d30:33651 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:25,396 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,33651,1733690784829] 2024-12-08T20:46:25,396 INFO [RS:1;229bab1f9d30:33651 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T20:46:25,396 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,43217,1733690784865] 2024-12-08T20:46:25,396 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,36579,1733690784797] 2024-12-08T20:46:25,396 DEBUG [RS:0;229bab1f9d30:36579 {}] zookeeper.ZKUtil(111): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,36579,1733690784797 2024-12-08T20:46:25,396 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,396 WARN [RS:0;229bab1f9d30:36579 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:25,396 INFO [RS:0;229bab1f9d30:36579 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T20:46:25,396 DEBUG [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,36579,1733690784797 2024-12-08T20:46:25,396 DEBUG [RS:2;229bab1f9d30:43217 {}] zookeeper.ZKUtil(111): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,43217,1733690784865 2024-12-08T20:46:25,396 WARN [RS:2;229bab1f9d30:43217 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T20:46:25,396 INFO [RS:2;229bab1f9d30:43217 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T20:46:25,397 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,43217,1733690784865 2024-12-08T20:46:25,400 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T20:46:25,400 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T20:46:25,400 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T20:46:25,402 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T20:46:25,402 INFO [RS:1;229bab1f9d30:33651 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T20:46:25,403 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,403 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T20:46:25,404 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T20:46:25,404 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,405 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T20:46:25,405 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,406 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:25,406 DEBUG [RS:1;229bab1f9d30:33651 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:25,407 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T20:46:25,407 INFO [RS:2;229bab1f9d30:43217 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T20:46:25,407 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,410 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T20:46:25,410 INFO [RS:0;229bab1f9d30:36579 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T20:46:25,410 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,411 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T20:46:25,411 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T20:46:25,411 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,411 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:25,411 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,411 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,411 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,412 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,33651,1733690784829-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 DEBUG [RS:2;229bab1f9d30:43217 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T20:46:25,412 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:25,413 DEBUG [RS:0;229bab1f9d30:36579 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T20:46:25,416 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,416 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,36579,1733690784797-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:25,417 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,43217,1733690784865-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:25,428 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T20:46:25,428 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,33651,1733690784829-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,428 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,428 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.Replication(171): 229bab1f9d30,33651,1733690784829 started 2024-12-08T20:46:25,436 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T20:46:25,436 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T20:46:25,436 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,43217,1733690784865-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,436 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,36579,1733690784797-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,437 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,437 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,437 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.Replication(171): 229bab1f9d30,43217,1733690784865 started 2024-12-08T20:46:25,437 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.Replication(171): 229bab1f9d30,36579,1733690784797 started 2024-12-08T20:46:25,441 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,441 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,33651,1733690784829, RpcServer on 229bab1f9d30/172.17.0.2:33651, sessionid=0x100073bba100002 2024-12-08T20:46:25,441 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T20:46:25,441 DEBUG [RS:1;229bab1f9d30:33651 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,441 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,33651,1733690784829' 2024-12-08T20:46:25,441 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T20:46:25,442 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T20:46:25,442 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T20:46:25,443 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T20:46:25,443 DEBUG [RS:1;229bab1f9d30:33651 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,443 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,33651,1733690784829' 2024-12-08T20:46:25,443 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T20:46:25,443 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T20:46:25,443 DEBUG [RS:1;229bab1f9d30:33651 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T20:46:25,443 INFO [RS:1;229bab1f9d30:33651 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T20:46:25,444 INFO [RS:1;229bab1f9d30:33651 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T20:46:25,456 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,456 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,456 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,36579,1733690784797, RpcServer on 229bab1f9d30/172.17.0.2:36579, sessionid=0x100073bba100001 2024-12-08T20:46:25,456 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,43217,1733690784865, RpcServer on 229bab1f9d30/172.17.0.2:43217, sessionid=0x100073bba100003 2024-12-08T20:46:25,456 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T20:46:25,456 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T20:46:25,457 DEBUG [RS:2;229bab1f9d30:43217 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,43217,1733690784865 2024-12-08T20:46:25,457 DEBUG [RS:0;229bab1f9d30:36579 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,36579,1733690784797 2024-12-08T20:46:25,457 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,43217,1733690784865' 2024-12-08T20:46:25,457 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,36579,1733690784797' 2024-12-08T20:46:25,457 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T20:46:25,457 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T20:46:25,457 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T20:46:25,457 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T20:46:25,458 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T20:46:25,458 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T20:46:25,458 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T20:46:25,458 DEBUG [RS:0;229bab1f9d30:36579 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,36579,1733690784797 2024-12-08T20:46:25,458 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T20:46:25,458 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,36579,1733690784797' 2024-12-08T20:46:25,458 DEBUG [RS:2;229bab1f9d30:43217 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,43217,1733690784865 2024-12-08T20:46:25,458 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T20:46:25,458 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,43217,1733690784865' 2024-12-08T20:46:25,458 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T20:46:25,458 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T20:46:25,459 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T20:46:25,459 DEBUG [RS:0;229bab1f9d30:36579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T20:46:25,459 INFO [RS:0;229bab1f9d30:36579 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T20:46:25,459 INFO [RS:0;229bab1f9d30:36579 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T20:46:25,459 DEBUG [RS:2;229bab1f9d30:43217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T20:46:25,459 INFO [RS:2;229bab1f9d30:43217 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T20:46:25,459 INFO [RS:2;229bab1f9d30:43217 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T20:46:25,549 INFO [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C33651%2C1733690784829, suffix=, logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,33651,1733690784829, archiveDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs, maxLogs=32 2024-12-08T20:46:25,553 INFO [RS:1;229bab1f9d30:33651 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 229bab1f9d30%2C33651%2C1733690784829.1733690785553 2024-12-08T20:46:25,561 INFO [RS:2;229bab1f9d30:43217 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C43217%2C1733690784865, suffix=, logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,43217,1733690784865, archiveDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs, maxLogs=32 2024-12-08T20:46:25,561 INFO [RS:0;229bab1f9d30:36579 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C36579%2C1733690784797, suffix=, logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,36579,1733690784797, archiveDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs, maxLogs=32 2024-12-08T20:46:25,562 INFO [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,33651,1733690784829/229bab1f9d30%2C33651%2C1733690784829.1733690785553 2024-12-08T20:46:25,563 INFO [RS:0;229bab1f9d30:36579 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 229bab1f9d30%2C36579%2C1733690784797.1733690785563 2024-12-08T20:46:25,563 INFO [RS:2;229bab1f9d30:43217 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 229bab1f9d30%2C43217%2C1733690784865.1733690785563 2024-12-08T20:46:25,565 DEBUG [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42937:42937),(127.0.0.1/127.0.0.1:40867:40867),(127.0.0.1/127.0.0.1:40735:40735)] 2024-12-08T20:46:25,575 INFO [RS:2;229bab1f9d30:43217 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,43217,1733690784865/229bab1f9d30%2C43217%2C1733690784865.1733690785563 2024-12-08T20:46:25,575 INFO [RS:0;229bab1f9d30:36579 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,36579,1733690784797/229bab1f9d30%2C36579%2C1733690784797.1733690785563 2024-12-08T20:46:25,579 DEBUG [RS:2;229bab1f9d30:43217 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42937:42937),(127.0.0.1/127.0.0.1:40867:40867),(127.0.0.1/127.0.0.1:40735:40735)] 2024-12-08T20:46:25,579 DEBUG [RS:0;229bab1f9d30:36579 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42937:42937),(127.0.0.1/127.0.0.1:40867:40867),(127.0.0.1/127.0.0.1:40735:40735)] 2024-12-08T20:46:25,645 DEBUG [229bab1f9d30:41941 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-08T20:46:25,646 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(204): Hosts are {229bab1f9d30=0} racks are {/default-rack=0} 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T20:46:25,651 INFO [229bab1f9d30:41941 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T20:46:25,651 INFO [229bab1f9d30:41941 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T20:46:25,651 INFO [229bab1f9d30:41941 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T20:46:25,651 DEBUG [229bab1f9d30:41941 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T20:46:25,652 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,655 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,33651,1733690784829, state=OPENING 2024-12-08T20:46:25,677 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T20:46:25,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:25,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,688 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T20:46:25,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=229bab1f9d30,33651,1733690784829}] 2024-12-08T20:46:25,689 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,844 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T20:46:25,849 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59537, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T20:46:25,856 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T20:46:25,857 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T20:46:25,860 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C33651%2C1733690784829.meta, suffix=.meta, logDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,33651,1733690784829, archiveDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs, maxLogs=32 2024-12-08T20:46:25,861 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 229bab1f9d30%2C33651%2C1733690784829.meta.1733690785861.meta 2024-12-08T20:46:25,870 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/WALs/229bab1f9d30,33651,1733690784829/229bab1f9d30%2C33651%2C1733690784829.meta.1733690785861.meta 2024-12-08T20:46:25,872 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42937:42937),(127.0.0.1/127.0.0.1:40735:40735),(127.0.0.1/127.0.0.1:40867:40867)] 2024-12-08T20:46:25,873 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T20:46:25,873 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T20:46:25,873 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T20:46:25,874 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T20:46:25,874 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T20:46:25,874 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:25,874 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T20:46:25,874 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T20:46:25,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T20:46:25,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T20:46:25,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T20:46:25,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T20:46:25,879 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T20:46:25,880 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T20:46:25,880 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T20:46:25,881 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T20:46:25,881 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:25,882 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T20:46:25,882 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T20:46:25,883 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740 2024-12-08T20:46:25,884 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740 2024-12-08T20:46:25,886 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T20:46:25,886 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T20:46:25,886 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T20:46:25,888 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T20:46:25,889 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65861989, jitterRate=-0.018579885363578796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T20:46:25,889 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T20:46:25,891 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733690785874Writing region info on filesystem at 1733690785874Initializing all the Stores at 1733690785875 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785875Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785875Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690785875Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733690785875Cleaning up temporary data from old regions at 1733690785886 (+11 ms)Running coprocessor post-open hooks at 1733690785889 (+3 ms)Region opened successfully at 1733690785890 (+1 ms) 2024-12-08T20:46:25,892 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733690785843 2024-12-08T20:46:25,895 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T20:46:25,895 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T20:46:25,896 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,897 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,33651,1733690784829, state=OPEN 2024-12-08T20:46:25,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:25,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:25,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:25,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T20:46:25,927 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=229bab1f9d30,33651,1733690784829 2024-12-08T20:46:25,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T20:46:25,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T20:46:25,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=229bab1f9d30,33651,1733690784829 in 239 msec 2024-12-08T20:46:25,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T20:46:25,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 690 msec 2024-12-08T20:46:25,936 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T20:46:25,936 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T20:46:25,938 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T20:46:25,938 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,33651,1733690784829, seqNum=-1] 2024-12-08T20:46:25,939 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T20:46:25,940 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45257, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T20:46:25,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 759 msec 2024-12-08T20:46:25,950 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733690785950, completionTime=-1 2024-12-08T20:46:25,950 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-08T20:46:25,950 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T20:46:25,953 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-08T20:46:25,953 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733690845953 2024-12-08T20:46:25,953 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733690905953 2024-12-08T20:46:25,953 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-08T20:46:25,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,41941,1733690784653-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,41941,1733690784653-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,41941,1733690784653-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-229bab1f9d30:41941, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,957 DEBUG [master/229bab1f9d30:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T20:46:25,960 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.041sec 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,41941,1733690784653-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T20:46:25,961 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,41941,1733690784653-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T20:46:25,964 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T20:46:25,964 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T20:46:25,964 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,41941,1733690784653-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T20:46:25,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a6e0fe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T20:46:25,986 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 229bab1f9d30,41941,-1 for getting cluster id 2024-12-08T20:46:25,986 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T20:46:25,988 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a1c5146d-2918-4c77-b93a-6c88e3833bcb' 2024-12-08T20:46:25,988 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T20:46:25,988 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a1c5146d-2918-4c77-b93a-6c88e3833bcb" 2024-12-08T20:46:25,989 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@edcd13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T20:46:25,989 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [229bab1f9d30,41941,-1] 2024-12-08T20:46:25,989 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T20:46:25,989 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:25,991 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T20:46:25,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1301f665, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T20:46:25,993 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T20:46:25,994 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,33651,1733690784829, seqNum=-1] 2024-12-08T20:46:25,994 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T20:46:25,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44510, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T20:46:25,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=229bab1f9d30,41941,1733690784653 2024-12-08T20:46:25,999 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T20:46:26,001 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 229bab1f9d30,41941,1733690784653 2024-12-08T20:46:26,001 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@702fa1f2 2024-12-08T20:46:26,001 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T20:46:26,003 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T20:46:26,004 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T20:46:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-08T20:46:26,008 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T20:46:26,009 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:26,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-08T20:46:26,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:26,010 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T20:46:26,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741837_1013 (size=392) 2024-12-08T20:46:26,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741837_1013 (size=392) 2024-12-08T20:46:26,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741837_1013 (size=392) 2024-12-08T20:46:26,023 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 86b33f0d4bbcb154c68285d1189f3374, NAME => 'TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8 2024-12-08T20:46:26,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741838_1014 (size=51) 2024-12-08T20:46:26,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741838_1014 (size=51) 2024-12-08T20:46:26,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741838_1014 (size=51) 2024-12-08T20:46:26,035 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:26,035 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 86b33f0d4bbcb154c68285d1189f3374, disabling compactions & flushes 2024-12-08T20:46:26,035 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,035 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,035 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. after waiting 0 ms 2024-12-08T20:46:26,035 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,035 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,035 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 86b33f0d4bbcb154c68285d1189f3374: Waiting for close lock at 1733690786035Disabling compacts and flushes for region at 1733690786035Disabling writes for close at 1733690786035Writing region close event to WAL at 1733690786035Closed at 1733690786035 2024-12-08T20:46:26,037 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T20:46:26,037 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733690786037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733690786037"}]},"ts":"1733690786037"} 2024-12-08T20:46:26,041 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T20:46:26,043 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T20:46:26,043 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733690786043"}]},"ts":"1733690786043"} 2024-12-08T20:46:26,047 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-08T20:46:26,047 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {229bab1f9d30=0} racks are {/default-rack=0} 2024-12-08T20:46:26,048 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T20:46:26,048 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T20:46:26,048 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T20:46:26,048 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T20:46:26,048 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T20:46:26,048 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T20:46:26,048 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T20:46:26,048 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T20:46:26,049 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T20:46:26,049 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T20:46:26,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=86b33f0d4bbcb154c68285d1189f3374, ASSIGN}] 2024-12-08T20:46:26,051 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=86b33f0d4bbcb154c68285d1189f3374, ASSIGN 2024-12-08T20:46:26,053 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=86b33f0d4bbcb154c68285d1189f3374, ASSIGN; state=OFFLINE, location=229bab1f9d30,43217,1733690784865; forceNewPlan=false, retain=false 2024-12-08T20:46:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:26,129 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T20:46:26,129 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T20:46:26,131 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T20:46:26,131 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T20:46:26,131 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T20:46:26,131 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T20:46:26,204 INFO [229bab1f9d30:41941 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T20:46:26,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=86b33f0d4bbcb154c68285d1189f3374, regionState=OPENING, regionLocation=229bab1f9d30,43217,1733690784865 2024-12-08T20:46:26,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=86b33f0d4bbcb154c68285d1189f3374, ASSIGN because future has completed 2024-12-08T20:46:26,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86b33f0d4bbcb154c68285d1189f3374, server=229bab1f9d30,43217,1733690784865}] 2024-12-08T20:46:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:26,366 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T20:46:26,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45859, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T20:46:26,376 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,377 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 86b33f0d4bbcb154c68285d1189f3374, NAME => 'TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374.', STARTKEY => '', ENDKEY => ''} 2024-12-08T20:46:26,377 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,377 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T20:46:26,378 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,378 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,380 INFO [StoreOpener-86b33f0d4bbcb154c68285d1189f3374-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,382 INFO [StoreOpener-86b33f0d4bbcb154c68285d1189f3374-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 86b33f0d4bbcb154c68285d1189f3374 columnFamilyName cf 2024-12-08T20:46:26,382 DEBUG [StoreOpener-86b33f0d4bbcb154c68285d1189f3374-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T20:46:26,382 INFO [StoreOpener-86b33f0d4bbcb154c68285d1189f3374-1 {}] regionserver.HStore(327): Store=86b33f0d4bbcb154c68285d1189f3374/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T20:46:26,383 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,383 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,384 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,384 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,384 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,386 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,389 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T20:46:26,390 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 86b33f0d4bbcb154c68285d1189f3374; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71123987, jitterRate=0.05982999503612518}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T20:46:26,390 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:26,390 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 86b33f0d4bbcb154c68285d1189f3374: Running coprocessor pre-open hook at 1733690786378Writing region info on filesystem at 1733690786378Initializing all the Stores at 1733690786379 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733690786380 (+1 ms)Cleaning up temporary data from old regions at 1733690786384 (+4 ms)Running coprocessor post-open hooks at 1733690786390 (+6 ms)Region opened successfully at 1733690786390 2024-12-08T20:46:26,392 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374., pid=6, masterSystemTime=1733690786366 2024-12-08T20:46:26,395 DEBUG [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,396 INFO [RS_OPEN_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,397 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=86b33f0d4bbcb154c68285d1189f3374, regionState=OPEN, openSeqNum=2, regionLocation=229bab1f9d30,43217,1733690784865 2024-12-08T20:46:26,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86b33f0d4bbcb154c68285d1189f3374, server=229bab1f9d30,43217,1733690784865 because future has completed 2024-12-08T20:46:26,401 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41941 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=229bab1f9d30,43217,1733690784865, table=TestHBaseWalOnEC, region=86b33f0d4bbcb154c68285d1189f3374. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-08T20:46:26,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T20:46:26,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 86b33f0d4bbcb154c68285d1189f3374, server=229bab1f9d30,43217,1733690784865 in 193 msec 2024-12-08T20:46:26,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T20:46:26,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=86b33f0d4bbcb154c68285d1189f3374, ASSIGN in 358 msec 2024-12-08T20:46:26,413 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T20:46:26,414 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733690786413"}]},"ts":"1733690786413"} 2024-12-08T20:46:26,417 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-08T20:46:26,419 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T20:46:26,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 415 msec 2024-12-08T20:46:26,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T20:46:26,638 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T20:46:26,638 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-08T20:46:26,638 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T20:46:26,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-08T20:46:26,643 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T20:46:26,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-08T20:46:26,649 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374., hostname=229bab1f9d30,43217,1733690784865, seqNum=2] 2024-12-08T20:46:26,650 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T20:46:26,653 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T20:46:26,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-08T20:46:26,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-08T20:46:26,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:26,660 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-08T20:46:26,662 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T20:46:26,662 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T20:46:26,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:26,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43217 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T20:46:26,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,818 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 86b33f0d4bbcb154c68285d1189f3374 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-08T20:46:26,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/.tmp/cf/f3ffe951a00744638d2778c03366eff0 is 36, key is row/cf:cq/1733690786654/Put/seqid=0 2024-12-08T20:46:26,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741839_1015 (size=4787) 2024-12-08T20:46:26,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741839_1015 (size=4787) 2024-12-08T20:46:26,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741839_1015 (size=4787) 2024-12-08T20:46:26,845 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/.tmp/cf/f3ffe951a00744638d2778c03366eff0 2024-12-08T20:46:26,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/.tmp/cf/f3ffe951a00744638d2778c03366eff0 as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/cf/f3ffe951a00744638d2778c03366eff0 2024-12-08T20:46:26,861 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/cf/f3ffe951a00744638d2778c03366eff0, entries=1, sequenceid=5, filesize=4.7 K 2024-12-08T20:46:26,863 INFO [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 86b33f0d4bbcb154c68285d1189f3374 in 45ms, sequenceid=5, compaction requested=false 2024-12-08T20:46:26,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 86b33f0d4bbcb154c68285d1189f3374: 2024-12-08T20:46:26,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:26,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T20:46:26,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T20:46:26,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T20:46:26,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 203 msec 2024-12-08T20:46:26,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 213 msec 2024-12-08T20:46:26,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41941 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T20:46:26,978 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T20:46:26,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T20:46:26,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T20:46:26,984 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:26,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:26,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:26,984 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T20:46:26,984 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T20:46:26,984 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1017608209, stopped=false 2024-12-08T20:46:26,985 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=229bab1f9d30,41941,1733690784653 2024-12-08T20:46:27,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:27,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:27,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:27,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:27,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:27,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:27,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T20:46:27,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:27,038 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T20:46:27,038 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T20:46:27,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:27,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:27,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:27,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T20:46:27,039 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:27,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,040 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,36579,1733690784797' ***** 2024-12-08T20:46:27,040 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T20:46:27,040 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,33651,1733690784829' ***** 2024-12-08T20:46:27,040 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T20:46:27,040 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,43217,1733690784865' ***** 2024-12-08T20:46:27,040 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T20:46:27,040 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T20:46:27,040 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T20:46:27,040 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T20:46:27,041 INFO [RS:0;229bab1f9d30:36579 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T20:46:27,041 INFO [RS:0;229bab1f9d30:36579 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T20:46:27,041 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,36579,1733690784797 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T20:46:27,041 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,33651,1733690784829 2024-12-08T20:46:27,041 INFO [RS:0;229bab1f9d30:36579 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:27,041 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(3091): Received CLOSE for 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:27,041 INFO [RS:0;229bab1f9d30:36579 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;229bab1f9d30:36579. 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;229bab1f9d30:33651. 2024-12-08T20:46:27,041 DEBUG [RS:0;229bab1f9d30:36579 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:27,041 DEBUG [RS:1;229bab1f9d30:33651 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:27,041 DEBUG [RS:0;229bab1f9d30:36579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,041 DEBUG [RS:1;229bab1f9d30:33651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,43217,1733690784865 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:27,041 INFO [RS:2;229bab1f9d30:43217 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;229bab1f9d30:43217. 2024-12-08T20:46:27,041 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,36579,1733690784797; all regions closed. 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T20:46:27,041 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T20:46:27,041 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 86b33f0d4bbcb154c68285d1189f3374, disabling compactions & flushes 2024-12-08T20:46:27,042 DEBUG [RS:2;229bab1f9d30:43217 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T20:46:27,042 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T20:46:27,042 INFO [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:27,042 DEBUG [RS:2;229bab1f9d30:43217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,042 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T20:46:27,042 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:27,042 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T20:46:27,042 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. after waiting 0 ms 2024-12-08T20:46:27,042 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1325): Online Regions={86b33f0d4bbcb154c68285d1189f3374=TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374.} 2024-12-08T20:46:27,042 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:27,042 DEBUG [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1351): Waiting on 86b33f0d4bbcb154c68285d1189f3374 2024-12-08T20:46:27,042 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,042 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,042 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,042 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,042 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,045 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T20:46:27,045 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T20:46:27,045 DEBUG [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T20:46:27,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T20:46:27,045 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T20:46:27,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T20:46:27,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T20:46:27,045 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T20:46:27,045 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-08T20:46:27,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741835_1011 (size=93) 2024-12-08T20:46:27,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741835_1011 (size=93) 2024-12-08T20:46:27,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741835_1011 (size=93) 2024-12-08T20:46:27,050 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/default/TestHBaseWalOnEC/86b33f0d4bbcb154c68285d1189f3374/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-08T20:46:27,051 INFO [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:27,051 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 86b33f0d4bbcb154c68285d1189f3374: Waiting for close lock at 1733690787041Running coprocessor pre-close hooks at 1733690787041Disabling compacts and flushes for region at 1733690787041Disabling writes for close at 1733690787042 (+1 ms)Writing region close event to WAL at 1733690787045 (+3 ms)Running coprocessor post-close hooks at 1733690787051 (+6 ms)Closed at 1733690787051 2024-12-08T20:46:27,051 DEBUG [RS_CLOSE_REGION-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374. 2024-12-08T20:46:27,051 DEBUG [RS:0;229bab1f9d30:36579 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs 2024-12-08T20:46:27,051 INFO [RS:0;229bab1f9d30:36579 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 229bab1f9d30%2C36579%2C1733690784797:(num 1733690785563) 2024-12-08T20:46:27,051 DEBUG [RS:0;229bab1f9d30:36579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,051 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T20:46:27,052 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:27,052 INFO [RS:0;229bab1f9d30:36579 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36579 2024-12-08T20:46:27,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,36579,1733690784797 2024-12-08T20:46:27,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T20:46:27,060 INFO [RS:0;229bab1f9d30:36579 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:27,061 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/info/f951978a47cb402d930c35e76e596ef9 is 153, key is TestHBaseWalOnEC,,1733690786004.86b33f0d4bbcb154c68285d1189f3374./info:regioninfo/1733690786396/Put/seqid=0 2024-12-08T20:46:27,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741840_1016 (size=6637) 2024-12-08T20:46:27,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741840_1016 (size=6637) 2024-12-08T20:46:27,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741840_1016 (size=6637) 2024-12-08T20:46:27,070 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,36579,1733690784797] 2024-12-08T20:46:27,071 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/info/f951978a47cb402d930c35e76e596ef9 2024-12-08T20:46:27,077 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,36579,1733690784797 already deleted, retry=false 2024-12-08T20:46:27,077 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,36579,1733690784797 expired; onlineServers=2 2024-12-08T20:46:27,092 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/ns/b1fd843128b34389befaa2fc63479b83 is 43, key is default/ns:d/1733690785941/Put/seqid=0 2024-12-08T20:46:27,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741841_1017 (size=5153) 2024-12-08T20:46:27,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741841_1017 (size=5153) 2024-12-08T20:46:27,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741841_1017 (size=5153) 2024-12-08T20:46:27,099 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/ns/b1fd843128b34389befaa2fc63479b83 2024-12-08T20:46:27,115 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:27,119 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:27,120 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:27,120 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/table/51a656b42a2c453099a3caeadfafa1bf is 52, key is TestHBaseWalOnEC/table:state/1733690786413/Put/seqid=0 2024-12-08T20:46:27,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741842_1018 (size=5249) 2024-12-08T20:46:27,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741842_1018 (size=5249) 2024-12-08T20:46:27,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741842_1018 (size=5249) 2024-12-08T20:46:27,127 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/table/51a656b42a2c453099a3caeadfafa1bf 2024-12-08T20:46:27,137 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/info/f951978a47cb402d930c35e76e596ef9 as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/info/f951978a47cb402d930c35e76e596ef9 2024-12-08T20:46:27,146 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/info/f951978a47cb402d930c35e76e596ef9, entries=10, sequenceid=11, filesize=6.5 K 2024-12-08T20:46:27,148 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/ns/b1fd843128b34389befaa2fc63479b83 as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/ns/b1fd843128b34389befaa2fc63479b83 2024-12-08T20:46:27,156 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/ns/b1fd843128b34389befaa2fc63479b83, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T20:46:27,158 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/.tmp/table/51a656b42a2c453099a3caeadfafa1bf as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/table/51a656b42a2c453099a3caeadfafa1bf 2024-12-08T20:46:27,166 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/table/51a656b42a2c453099a3caeadfafa1bf, entries=2, sequenceid=11, filesize=5.1 K 2024-12-08T20:46:27,167 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false 2024-12-08T20:46:27,170 INFO [RS:0;229bab1f9d30:36579 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:27,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,170 INFO [RS:0;229bab1f9d30:36579 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,36579,1733690784797; zookeeper connection closed. 2024-12-08T20:46:27,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36579-0x100073bba100001, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,171 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@364a8c8f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@364a8c8f 2024-12-08T20:46:27,173 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T20:46:27,173 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T20:46:27,173 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T20:46:27,173 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733690787045Running coprocessor pre-close hooks at 1733690787045Disabling compacts and flushes for region at 1733690787045Disabling writes for close at 1733690787045Obtaining lock to block concurrent updates at 1733690787045Preparing flush snapshotting stores in 1588230740 at 1733690787045Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733690787046 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733690787047 (+1 ms)Flushing 1588230740/info: creating writer at 1733690787047Flushing 1588230740/info: appending metadata at 1733690787061 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733690787061Flushing 1588230740/ns: creating writer at 1733690787078 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733690787092 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733690787092Flushing 1588230740/table: creating writer at 1733690787107 (+15 ms)Flushing 1588230740/table: appending metadata at 1733690787119 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733690787120 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fb4d812: reopening flushed file at 1733690787135 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6492c51c: reopening flushed file at 1733690787147 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@748febfa: reopening flushed file at 1733690787156 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false at 1733690787167 (+11 ms)Writing region close event to WAL at 1733690787168 (+1 ms)Running coprocessor post-close hooks at 1733690787173 (+5 ms)Closed at 1733690787173 2024-12-08T20:46:27,174 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T20:46:27,242 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,43217,1733690784865; all regions closed. 2024-12-08T20:46:27,243 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,243 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,243 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,243 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,243 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,245 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,33651,1733690784829; all regions closed. 2024-12-08T20:46:27,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741834_1010 (size=1298) 2024-12-08T20:46:27,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741834_1010 (size=1298) 2024-12-08T20:46:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741834_1010 (size=1298) 2024-12-08T20:46:27,247 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,247 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,247 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,247 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,247 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741836_1012 (size=2751) 2024-12-08T20:46:27,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741836_1012 (size=2751) 2024-12-08T20:46:27,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741836_1012 (size=2751) 2024-12-08T20:46:27,251 DEBUG [RS:2;229bab1f9d30:43217 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs 2024-12-08T20:46:27,251 INFO [RS:2;229bab1f9d30:43217 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 229bab1f9d30%2C43217%2C1733690784865:(num 1733690785563) 2024-12-08T20:46:27,251 DEBUG [RS:2;229bab1f9d30:43217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,251 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:27,251 INFO [RS:2;229bab1f9d30:43217 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:27,251 INFO [RS:2;229bab1f9d30:43217 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:27,251 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T20:46:27,251 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:27,252 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T20:46:27,252 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T20:46:27,252 INFO [RS:2;229bab1f9d30:43217 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:27,252 INFO [RS:2;229bab1f9d30:43217 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43217 2024-12-08T20:46:27,253 DEBUG [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs 2024-12-08T20:46:27,253 INFO [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 229bab1f9d30%2C33651%2C1733690784829.meta:.meta(num 1733690785861) 2024-12-08T20:46:27,253 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741833_1009 (size=93) 2024-12-08T20:46:27,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741833_1009 (size=93) 2024-12-08T20:46:27,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741833_1009 (size=93) 2024-12-08T20:46:27,260 DEBUG [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/oldWALs 2024-12-08T20:46:27,260 INFO [RS:1;229bab1f9d30:33651 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 229bab1f9d30%2C33651%2C1733690784829:(num 1733690785553) 2024-12-08T20:46:27,260 DEBUG [RS:1;229bab1f9d30:33651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T20:46:27,260 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T20:46:27,260 INFO [RS:1;229bab1f9d30:33651 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:27,261 INFO [RS:1;229bab1f9d30:33651 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:27,261 INFO [RS:1;229bab1f9d30:33651 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:27,261 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:27,261 INFO [RS:1;229bab1f9d30:33651 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33651 2024-12-08T20:46:27,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,43217,1733690784865 2024-12-08T20:46:27,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T20:46:27,276 INFO [RS:2;229bab1f9d30:43217 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:27,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,33651,1733690784829 2024-12-08T20:46:27,285 INFO [RS:1;229bab1f9d30:33651 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:27,285 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$376/0x00007ffaa88f8520@6b436299 rejected from java.util.concurrent.ThreadPoolExecutor@2e41e195[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-08T20:46:27,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,33651,1733690784829] 2024-12-08T20:46:27,310 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,33651,1733690784829 already deleted, retry=false 2024-12-08T20:46:27,310 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,33651,1733690784829 expired; onlineServers=1 2024-12-08T20:46:27,310 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,43217,1733690784865] 2024-12-08T20:46:27,318 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,43217,1733690784865 already deleted, retry=false 2024-12-08T20:46:27,319 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,43217,1733690784865 expired; onlineServers=0 2024-12-08T20:46:27,319 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '229bab1f9d30,41941,1733690784653' ***** 2024-12-08T20:46:27,319 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T20:46:27,319 INFO [M:0;229bab1f9d30:41941 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T20:46:27,319 INFO [M:0;229bab1f9d30:41941 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T20:46:27,319 DEBUG [M:0;229bab1f9d30:41941 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T20:46:27,319 DEBUG [M:0;229bab1f9d30:41941 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T20:46:27,319 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T20:46:27,319 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733690785197 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733690785197,5,FailOnTimeoutGroup] 2024-12-08T20:46:27,319 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733690785197 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733690785197,5,FailOnTimeoutGroup] 2024-12-08T20:46:27,319 INFO [M:0;229bab1f9d30:41941 {}] hbase.ChoreService(370): Chore service for: master/229bab1f9d30:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T20:46:27,320 INFO [M:0;229bab1f9d30:41941 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T20:46:27,320 DEBUG [M:0;229bab1f9d30:41941 {}] master.HMaster(1795): Stopping service threads 2024-12-08T20:46:27,320 INFO [M:0;229bab1f9d30:41941 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T20:46:27,320 INFO [M:0;229bab1f9d30:41941 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T20:46:27,320 INFO [M:0;229bab1f9d30:41941 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T20:46:27,320 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T20:46:27,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T20:46:27,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T20:46:27,327 DEBUG [M:0;229bab1f9d30:41941 {}] zookeeper.ZKUtil(347): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T20:46:27,327 WARN [M:0;229bab1f9d30:41941 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T20:46:27,328 INFO [M:0;229bab1f9d30:41941 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/.lastflushedseqids 2024-12-08T20:46:27,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741843_1019 (size=127) 2024-12-08T20:46:27,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741843_1019 (size=127) 2024-12-08T20:46:27,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741843_1019 (size=127) 2024-12-08T20:46:27,335 INFO [M:0;229bab1f9d30:41941 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T20:46:27,336 INFO [M:0;229bab1f9d30:41941 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T20:46:27,336 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T20:46:27,336 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:27,336 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:27,336 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T20:46:27,336 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:27,336 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-08T20:46:27,351 DEBUG [M:0;229bab1f9d30:41941 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c09f5c1c794e4bffba3e40f48d4654d4 is 82, key is hbase:meta,,1/info:regioninfo/1733690785896/Put/seqid=0 2024-12-08T20:46:27,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741844_1020 (size=5672) 2024-12-08T20:46:27,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741844_1020 (size=5672) 2024-12-08T20:46:27,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741844_1020 (size=5672) 2024-12-08T20:46:27,359 INFO [M:0;229bab1f9d30:41941 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c09f5c1c794e4bffba3e40f48d4654d4 2024-12-08T20:46:27,378 DEBUG [M:0;229bab1f9d30:41941 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/57e122f1e26640a3863e6869ba5137f1 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733690786421/Put/seqid=0 2024-12-08T20:46:27,379 WARN [IPC Server handler 2 on default port 35551 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T20:46:27,379 WARN [IPC Server handler 2 on default port 35551 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T20:46:27,379 WARN [IPC Server handler 2 on default port 35551 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T20:46:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741845_1021 (size=6440) 2024-12-08T20:46:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741845_1021 (size=6440) 2024-12-08T20:46:27,384 INFO [M:0;229bab1f9d30:41941 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/57e122f1e26640a3863e6869ba5137f1 2024-12-08T20:46:27,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,394 INFO [RS:2;229bab1f9d30:43217 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:27,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43217-0x100073bba100003, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,394 INFO [RS:2;229bab1f9d30:43217 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,43217,1733690784865; zookeeper connection closed. 2024-12-08T20:46:27,394 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2692ad74 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2692ad74 2024-12-08T20:46:27,402 INFO [RS:1;229bab1f9d30:33651 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:27,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33651-0x100073bba100002, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,402 INFO [RS:1;229bab1f9d30:33651 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,33651,1733690784829; zookeeper connection closed. 2024-12-08T20:46:27,402 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2914d140 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2914d140 2024-12-08T20:46:27,403 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-08T20:46:27,408 DEBUG [M:0;229bab1f9d30:41941 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd97bc0f7049451aa60af23be6d15f92 is 69, key is 229bab1f9d30,33651,1733690784829/rs:state/1733690785360/Put/seqid=0 2024-12-08T20:46:27,409 WARN [IPC Server handler 3 on default port 35551 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T20:46:27,409 WARN [IPC Server handler 3 on default port 35551 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T20:46:27,409 WARN [IPC Server handler 3 on default port 35551 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T20:46:27,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741846_1022 (size=5294) 2024-12-08T20:46:27,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741846_1022 (size=5294) 2024-12-08T20:46:27,414 INFO [M:0;229bab1f9d30:41941 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd97bc0f7049451aa60af23be6d15f92 2024-12-08T20:46:27,419 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c09f5c1c794e4bffba3e40f48d4654d4 as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c09f5c1c794e4bffba3e40f48d4654d4 2024-12-08T20:46:27,425 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c09f5c1c794e4bffba3e40f48d4654d4, entries=8, sequenceid=72, filesize=5.5 K 2024-12-08T20:46:27,427 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/57e122f1e26640a3863e6869ba5137f1 as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/57e122f1e26640a3863e6869ba5137f1 2024-12-08T20:46:27,433 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/57e122f1e26640a3863e6869ba5137f1, entries=8, sequenceid=72, filesize=6.3 K 2024-12-08T20:46:27,434 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd97bc0f7049451aa60af23be6d15f92 as hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fd97bc0f7049451aa60af23be6d15f92 2024-12-08T20:46:27,440 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35551/user/jenkins/test-data/06b016e4-daea-abc3-8c61-630a69c5e5d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fd97bc0f7049451aa60af23be6d15f92, entries=3, sequenceid=72, filesize=5.2 K 2024-12-08T20:46:27,441 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=72, compaction requested=false 2024-12-08T20:46:27,442 INFO [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T20:46:27,443 DEBUG [M:0;229bab1f9d30:41941 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733690787336Disabling compacts and flushes for region at 1733690787336Disabling writes for close at 1733690787336Obtaining lock to block concurrent updates at 1733690787336Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733690787336Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733690787336Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733690787337 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733690787337Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733690787351 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733690787351Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733690787364 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733690787378 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733690787378Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733690787389 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733690787407 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733690787407Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5717214c: reopening flushed file at 1733690787418 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a53bb86: reopening flushed file at 1733690787426 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a4b8338: reopening flushed file at 1733690787433 (+7 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=72, compaction requested=false at 1733690787441 (+8 ms)Writing region close event to WAL at 1733690787442 (+1 ms)Closed at 1733690787442 2024-12-08T20:46:27,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T20:46:27,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34285 is added to blk_1073741830_1006 (size=32683) 2024-12-08T20:46:27,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43581 is added to blk_1073741830_1006 (size=32683) 2024-12-08T20:46:27,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38287 is added to blk_1073741830_1006 (size=32683) 2024-12-08T20:46:27,447 INFO [M:0;229bab1f9d30:41941 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T20:46:27,447 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T20:46:27,447 INFO [M:0;229bab1f9d30:41941 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41941 2024-12-08T20:46:27,447 INFO [M:0;229bab1f9d30:41941 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T20:46:27,578 INFO [M:0;229bab1f9d30:41941 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T20:46:27,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41941-0x100073bba100000, quorum=127.0.0.1:57990, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T20:46:27,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1dd777e0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:27,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@d8758e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:27,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:27,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6861a8ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:27,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19b3fbfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:27,586 WARN [BP-1421637431-172.17.0.2-1733690782608 heartbeating to localhost/127.0.0.1:35551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T20:46:27,586 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T20:46:27,586 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T20:46:27,586 WARN [BP-1421637431-172.17.0.2-1733690782608 heartbeating to localhost/127.0.0.1:35551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1421637431-172.17.0.2-1733690782608 (Datanode Uuid 75be7945-4c10-4158-b655-f9c4c6b12e00) service to localhost/127.0.0.1:35551 2024-12-08T20:46:27,587 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data5/current/BP-1421637431-172.17.0.2-1733690782608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:27,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data6/current/BP-1421637431-172.17.0.2-1733690782608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:27,588 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T20:46:27,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@738646c4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:27,591 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39163242{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:27,592 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:27,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a06885{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:27,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63ea337e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:27,593 WARN [BP-1421637431-172.17.0.2-1733690782608 heartbeating to localhost/127.0.0.1:35551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T20:46:27,593 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T20:46:27,593 WARN [BP-1421637431-172.17.0.2-1733690782608 heartbeating to localhost/127.0.0.1:35551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1421637431-172.17.0.2-1733690782608 (Datanode Uuid 965387b1-49ff-4198-8910-d17ab35a5c54) service to localhost/127.0.0.1:35551 2024-12-08T20:46:27,593 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T20:46:27,594 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data3/current/BP-1421637431-172.17.0.2-1733690782608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:27,594 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data4/current/BP-1421637431-172.17.0.2-1733690782608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:27,594 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T20:46:27,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ec581d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T20:46:27,596 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b495d6d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:27,596 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:27,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e639fe5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:27,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@427407e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:27,598 WARN [BP-1421637431-172.17.0.2-1733690782608 heartbeating to localhost/127.0.0.1:35551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T20:46:27,598 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T20:46:27,598 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T20:46:27,598 WARN [BP-1421637431-172.17.0.2-1733690782608 heartbeating to localhost/127.0.0.1:35551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1421637431-172.17.0.2-1733690782608 (Datanode Uuid 87a2727d-b3a3-4ecf-a9e8-2ee106afb1eb) service to localhost/127.0.0.1:35551 2024-12-08T20:46:27,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data1/current/BP-1421637431-172.17.0.2-1733690782608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:27,599 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/cluster_fd27403a-8c55-94b7-a01b-e3cd414232a2/data/data2/current/BP-1421637431-172.17.0.2-1733690782608 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T20:46:27,599 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T20:46:27,604 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60edc840{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T20:46:27,605 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@362f3414{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T20:46:27,605 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T20:46:27,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14abb266{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T20:46:27,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fbe2fc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b580dfbb-6198-ecca-0362-8ba1e077f26c/hadoop.log.dir/,STOPPED} 2024-12-08T20:46:27,613 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T20:46:27,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T20:46:27,643 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=150 (was 90) - Thread LEAK? -, OpenFileDescriptor=518 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=188 (was 186) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=16732 (was 16927)