2024-12-03 08:06:10,932 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-03 08:06:10,945 main DEBUG Took 0.011668 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 08:06:10,946 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 08:06:10,946 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 08:06:10,947 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 08:06:10,949 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,959 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 08:06:10,980 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,982 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,983 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,983 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,984 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,984 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,986 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,986 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,987 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,987 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,988 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,989 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,989 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,990 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,991 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,991 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,992 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,993 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,994 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,994 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 08:06:10,995 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,996 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 08:06:10,997 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 08:06:10,998 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 08:06:11,000 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 08:06:11,001 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 08:06:11,002 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 08:06:11,003 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 08:06:11,013 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 08:06:11,016 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 08:06:11,017 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 08:06:11,018 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 08:06:11,018 main DEBUG createAppenders(={Console}) 2024-12-03 08:06:11,019 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-03 08:06:11,020 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-03 08:06:11,020 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-03 08:06:11,021 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 08:06:11,021 main DEBUG OutputStream closed 2024-12-03 08:06:11,022 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 08:06:11,022 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 08:06:11,022 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-03 08:06:11,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 08:06:11,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 08:06:11,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 08:06:11,106 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 08:06:11,107 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 08:06:11,107 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 08:06:11,108 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 08:06:11,108 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 08:06:11,109 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 08:06:11,109 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 08:06:11,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 08:06:11,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 08:06:11,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 08:06:11,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 08:06:11,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 08:06:11,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 08:06:11,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 08:06:11,113 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 08:06:11,115 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 08:06:11,115 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-03 08:06:11,115 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 08:06:11,116 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-03T08:06:11,133 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-03 08:06:11,135 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 08:06:11,136 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T08:06:11,375 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961 2024-12-03T08:06:11,402 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521, deleteOnExit=true 2024-12-03T08:06:11,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/test.cache.data in system properties and HBase conf 2024-12-03T08:06:11,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T08:06:11,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir in system properties and HBase conf 2024-12-03T08:06:11,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T08:06:11,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T08:06:11,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T08:06:11,495 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T08:06:11,580 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T08:06:11,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T08:06:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T08:06:11,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T08:06:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T08:06:11,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T08:06:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T08:06:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T08:06:11,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T08:06:11,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T08:06:11,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/nfs.dump.dir in system properties and HBase conf 2024-12-03T08:06:11,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/java.io.tmpdir in system properties and HBase conf 2024-12-03T08:06:11,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T08:06:11,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T08:06:11,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T08:06:12,372 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T08:06:12,447 INFO [Time-limited test {}] log.Log(170): Logging initialized @2216ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T08:06:12,521 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:12,582 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:12,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:12,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:12,603 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T08:06:12,617 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:12,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:12,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:12,834 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/java.io.tmpdir/jetty-localhost-42379-hadoop-hdfs-3_4_1-tests_jar-_-any-14973557346127608061/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T08:06:12,841 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:42379} 2024-12-03T08:06:12,842 INFO [Time-limited test {}] server.Server(415): Started @2612ms 2024-12-03T08:06:13,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:13,231 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:13,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:13,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:13,232 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T08:06:13,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:13,234 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:13,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/java.io.tmpdir/jetty-localhost-41065-hadoop-hdfs-3_4_1-tests_jar-_-any-10348317354189476832/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:13,357 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:41065} 2024-12-03T08:06:13,358 INFO [Time-limited test {}] server.Server(415): Started @3128ms 2024-12-03T08:06:13,413 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T08:06:13,532 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:13,539 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:13,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:13,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:13,543 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T08:06:13,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:13,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:13,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/java.io.tmpdir/jetty-localhost-33305-hadoop-hdfs-3_4_1-tests_jar-_-any-7042752457641074030/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:13,698 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:33305} 2024-12-03T08:06:13,698 INFO [Time-limited test {}] server.Server(415): Started @3469ms 2024-12-03T08:06:13,700 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T08:06:13,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:13,740 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:13,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:13,742 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:13,742 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T08:06:13,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:13,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:13,846 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data4/current/BP-1083000352-172.17.0.2-1733213172129/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:13,846 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data1/current/BP-1083000352-172.17.0.2-1733213172129/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:13,846 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data2/current/BP-1083000352-172.17.0.2-1733213172129/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:13,846 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data3/current/BP-1083000352-172.17.0.2-1733213172129/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:13,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/java.io.tmpdir/jetty-localhost-43225-hadoop-hdfs-3_4_1-tests_jar-_-any-8407509668052136509/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:13,889 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T08:06:13,890 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T08:06:13,890 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:43225} 2024-12-03T08:06:13,891 INFO [Time-limited test {}] server.Server(415): Started @3661ms 2024-12-03T08:06:13,894 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T08:06:13,958 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16e8e7ada0b56048 with lease ID 0x4e1d0c9e11335a84: Processing first storage report for DS-7448b040-3754-49ef-b37f-8ea71d12640e from datanode DatanodeRegistration(127.0.0.1:39291, datanodeUuid=21ef23be-79ca-4047-afc8-056a91c35486, infoPort=39841, infoSecurePort=0, ipcPort=39223, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129) 2024-12-03T08:06:13,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16e8e7ada0b56048 with lease ID 0x4e1d0c9e11335a84: from storage DS-7448b040-3754-49ef-b37f-8ea71d12640e node DatanodeRegistration(127.0.0.1:39291, datanodeUuid=21ef23be-79ca-4047-afc8-056a91c35486, infoPort=39841, infoSecurePort=0, ipcPort=39223, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-03T08:06:13,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7f35fd6491f4835 with lease ID 0x4e1d0c9e11335a85: Processing first storage report for DS-936e5da1-52d2-4398-ae0a-1ec94da604f3 from datanode DatanodeRegistration(127.0.0.1:39715, datanodeUuid=1185b3ea-d0c6-4ebc-9f85-37b3c68f4cfc, infoPort=44807, infoSecurePort=0, ipcPort=40847, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129) 2024-12-03T08:06:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7f35fd6491f4835 with lease ID 0x4e1d0c9e11335a85: from storage DS-936e5da1-52d2-4398-ae0a-1ec94da604f3 node DatanodeRegistration(127.0.0.1:39715, datanodeUuid=1185b3ea-d0c6-4ebc-9f85-37b3c68f4cfc, infoPort=44807, infoSecurePort=0, ipcPort=40847, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16e8e7ada0b56048 with lease ID 0x4e1d0c9e11335a84: Processing first storage report for DS-fd49d599-436b-48e2-9225-c8ce4976de0e from datanode DatanodeRegistration(127.0.0.1:39291, datanodeUuid=21ef23be-79ca-4047-afc8-056a91c35486, infoPort=39841, infoSecurePort=0, ipcPort=39223, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129) 2024-12-03T08:06:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16e8e7ada0b56048 with lease ID 0x4e1d0c9e11335a84: from storage DS-fd49d599-436b-48e2-9225-c8ce4976de0e node DatanodeRegistration(127.0.0.1:39291, datanodeUuid=21ef23be-79ca-4047-afc8-056a91c35486, infoPort=39841, infoSecurePort=0, ipcPort=39223, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T08:06:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7f35fd6491f4835 with lease ID 0x4e1d0c9e11335a85: Processing first storage report for DS-4e348343-4cee-4084-8dd6-2fdc8315d36d from datanode DatanodeRegistration(127.0.0.1:39715, datanodeUuid=1185b3ea-d0c6-4ebc-9f85-37b3c68f4cfc, infoPort=44807, infoSecurePort=0, ipcPort=40847, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129) 2024-12-03T08:06:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7f35fd6491f4835 with lease ID 0x4e1d0c9e11335a85: from storage DS-4e348343-4cee-4084-8dd6-2fdc8315d36d node DatanodeRegistration(127.0.0.1:39715, datanodeUuid=1185b3ea-d0c6-4ebc-9f85-37b3c68f4cfc, infoPort=44807, infoSecurePort=0, ipcPort=40847, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:13,996 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data6/current/BP-1083000352-172.17.0.2-1733213172129/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:13,996 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data5/current/BP-1083000352-172.17.0.2-1733213172129/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:14,022 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T08:06:14,028 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa00f5be5d1982147 with lease ID 0x4e1d0c9e11335a86: Processing first storage report for DS-6fd1f05f-2887-4568-aa99-464de1dfeefa from datanode DatanodeRegistration(127.0.0.1:43251, datanodeUuid=5c76adae-747e-4c0c-a478-02fbf3eb27c1, infoPort=35865, infoSecurePort=0, ipcPort=35597, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129) 2024-12-03T08:06:14,028 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa00f5be5d1982147 with lease ID 0x4e1d0c9e11335a86: from storage DS-6fd1f05f-2887-4568-aa99-464de1dfeefa node DatanodeRegistration(127.0.0.1:43251, datanodeUuid=5c76adae-747e-4c0c-a478-02fbf3eb27c1, infoPort=35865, infoSecurePort=0, ipcPort=35597, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T08:06:14,028 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa00f5be5d1982147 with lease ID 0x4e1d0c9e11335a86: Processing first storage report for DS-6cfccff8-8cf7-47a1-8197-fe5e1d7abe20 from datanode DatanodeRegistration(127.0.0.1:43251, datanodeUuid=5c76adae-747e-4c0c-a478-02fbf3eb27c1, infoPort=35865, infoSecurePort=0, ipcPort=35597, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129) 2024-12-03T08:06:14,028 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa00f5be5d1982147 with lease ID 0x4e1d0c9e11335a86: from storage DS-6cfccff8-8cf7-47a1-8197-fe5e1d7abe20 node DatanodeRegistration(127.0.0.1:43251, datanodeUuid=5c76adae-747e-4c0c-a478-02fbf3eb27c1, infoPort=35865, infoSecurePort=0, ipcPort=35597, storageInfo=lv=-57;cid=testClusterID;nsid=344125561;c=1733213172129), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:14,236 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961 2024-12-03T08:06:14,309 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-03T08:06:14,363 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=40, ProcessCount=11, AvailableMemoryMB=8851 2024-12-03T08:06:14,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T08:06:14,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-03T08:06:14,456 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/zookeeper_0, clientPort=57724, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T08:06:14,467 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57724 2024-12-03T08:06:14,482 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:14,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:14,596 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:14,596 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:14,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:35874 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35874 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:14,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-03T08:06:15,063 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:15,072 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc with version=8 2024-12-03T08:06:15,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/hbase-staging 2024-12-03T08:06:15,175 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T08:06:15,422 INFO [Time-limited test {}] client.ConnectionUtils(128): master/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:15,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:15,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:15,437 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:15,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:15,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:15,574 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T08:06:15,636 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T08:06:15,648 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T08:06:15,653 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:15,680 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 475 (auto-detected) 2024-12-03T08:06:15,681 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T08:06:15,700 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46089 2024-12-03T08:06:15,721 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46089 connecting to ZooKeeper ensemble=127.0.0.1:57724 2024-12-03T08:06:15,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:460890x0, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:15,752 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46089-0x10152212f4e0000 connected 2024-12-03T08:06:15,779 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:15,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:15,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:15,797 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc, hbase.cluster.distributed=false 2024-12-03T08:06:15,824 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:15,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46089 2024-12-03T08:06:15,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46089 2024-12-03T08:06:15,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46089 2024-12-03T08:06:15,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46089 2024-12-03T08:06:15,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46089 2024-12-03T08:06:15,944 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:15,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:15,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:15,947 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:15,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:15,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:15,950 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T08:06:15,953 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:15,954 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38655 2024-12-03T08:06:15,955 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38655 connecting to ZooKeeper ensemble=127.0.0.1:57724 2024-12-03T08:06:15,957 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:15,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:15,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:386550x0, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:15,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38655-0x10152212f4e0001 connected 2024-12-03T08:06:15,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:15,974 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T08:06:15,982 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T08:06:15,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T08:06:15,991 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:15,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38655 2024-12-03T08:06:15,993 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38655 2024-12-03T08:06:15,993 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38655 2024-12-03T08:06:15,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38655 2024-12-03T08:06:15,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38655 2024-12-03T08:06:16,012 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:16,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:16,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:16,012 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:16,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:16,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:16,013 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T08:06:16,013 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:16,014 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39531 2024-12-03T08:06:16,015 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39531 connecting to ZooKeeper ensemble=127.0.0.1:57724 2024-12-03T08:06:16,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:16,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:16,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395310x0, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:16,027 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39531-0x10152212f4e0002 connected 2024-12-03T08:06:16,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:16,027 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T08:06:16,028 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T08:06:16,030 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T08:06:16,032 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:16,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39531 2024-12-03T08:06:16,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39531 2024-12-03T08:06:16,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39531 2024-12-03T08:06:16,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39531 2024-12-03T08:06:16,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39531 2024-12-03T08:06:16,055 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:16,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:16,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:16,055 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:16,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:16,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:16,056 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T08:06:16,056 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:16,057 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34321 2024-12-03T08:06:16,059 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34321 connecting to ZooKeeper ensemble=127.0.0.1:57724 2024-12-03T08:06:16,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:16,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:16,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343210x0, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:16,068 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34321-0x10152212f4e0003 connected 2024-12-03T08:06:16,068 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:16,069 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T08:06:16,070 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T08:06:16,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T08:06:16,073 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:16,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34321 2024-12-03T08:06:16,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34321 2024-12-03T08:06:16,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34321 2024-12-03T08:06:16,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34321 2024-12-03T08:06:16,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34321 2024-12-03T08:06:16,091 DEBUG [M:0;911db94732f6:46089 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;911db94732f6:46089 2024-12-03T08:06:16,092 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/911db94732f6,46089,1733213175227 2024-12-03T08:06:16,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,101 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/911db94732f6,46089,1733213175227 2024-12-03T08:06:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,122 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T08:06:16,123 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/911db94732f6,46089,1733213175227 from backup master directory 2024-12-03T08:06:16,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/911db94732f6,46089,1733213175227 2024-12-03T08:06:16,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:16,127 WARN [master/911db94732f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:16,127 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=911db94732f6,46089,1733213175227 2024-12-03T08:06:16,129 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T08:06:16,131 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T08:06:16,192 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/hbase.id] with ID: a0fb6aaf-dc81-445c-98c4-aa387343d58e 2024-12-03T08:06:16,192 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/.tmp/hbase.id 2024-12-03T08:06:16,199 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,199 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:35898 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35898 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:16,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-03T08:06:16,210 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:16,210 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/.tmp/hbase.id]:[hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/hbase.id] 2024-12-03T08:06:16,251 INFO [master/911db94732f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:16,256 INFO [master/911db94732f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T08:06:16,275 INFO [master/911db94732f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-03T08:06:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,290 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,290 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,293 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:56892 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56892 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:16,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-03T08:06:16,299 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:16,314 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T08:06:16,316 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T08:06:16,322 INFO [master/911db94732f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T08:06:16,350 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,351 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:35920 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35920 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:16,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-03T08:06:16,361 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:16,379 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store 2024-12-03T08:06:16,394 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,394 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,397 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:35940 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35940 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:16,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-03T08:06:16,402 WARN [master/911db94732f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:16,407 INFO [master/911db94732f6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T08:06:16,409 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:16,410 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T08:06:16,410 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:16,411 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:16,412 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T08:06:16,412 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:16,412 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:16,414 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733213176410Disabling compacts and flushes for region at 1733213176410Disabling writes for close at 1733213176412 (+2 ms)Writing region close event to WAL at 1733213176412Closed at 1733213176412 2024-12-03T08:06:16,415 WARN [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/.initializing 2024-12-03T08:06:16,416 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/WALs/911db94732f6,46089,1733213175227 2024-12-03T08:06:16,424 INFO [master/911db94732f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T08:06:16,437 INFO [master/911db94732f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C46089%2C1733213175227, suffix=, logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/WALs/911db94732f6,46089,1733213175227, archiveDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/oldWALs, maxLogs=10 2024-12-03T08:06:16,467 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/WALs/911db94732f6,46089,1733213175227/911db94732f6%2C46089%2C1733213175227.1733213176441, exclude list is [], retry=0 2024-12-03T08:06:16,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:16,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39291,DS-7448b040-3754-49ef-b37f-8ea71d12640e,DISK] 2024-12-03T08:06:16,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43251,DS-6fd1f05f-2887-4568-aa99-464de1dfeefa,DISK] 2024-12-03T08:06:16,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39715,DS-936e5da1-52d2-4398-ae0a-1ec94da604f3,DISK] 2024-12-03T08:06:16,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-03T08:06:16,533 INFO [master/911db94732f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/WALs/911db94732f6,46089,1733213175227/911db94732f6%2C46089%2C1733213175227.1733213176441 2024-12-03T08:06:16,534 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:39841:39841),(127.0.0.1/127.0.0.1:35865:35865)] 2024-12-03T08:06:16,534 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T08:06:16,535 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:16,538 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,539 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T08:06:16,604 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:16,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:16,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T08:06:16,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:16,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:16,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T08:06:16,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:16,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:16,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T08:06:16,617 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:16,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:16,618 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,621 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,622 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,627 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,628 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,631 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T08:06:16,634 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:16,640 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T08:06:16,641 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66845064, jitterRate=-0.003930926322937012}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T08:06:16,646 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733213176551Initializing all the Stores at 1733213176553 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213176553Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213176554 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213176554Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213176554Cleaning up temporary data from old regions at 1733213176628 (+74 ms)Region opened successfully at 1733213176646 (+18 ms) 2024-12-03T08:06:16,647 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T08:06:16,679 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3de8403d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:16,711 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T08:06:16,722 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T08:06:16,722 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T08:06:16,725 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T08:06:16,726 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T08:06:16,731 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-03T08:06:16,731 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T08:06:16,756 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T08:06:16,764 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T08:06:16,766 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T08:06:16,768 INFO [master/911db94732f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T08:06:16,769 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T08:06:16,771 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T08:06:16,773 INFO [master/911db94732f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T08:06:16,776 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T08:06:16,777 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T08:06:16,778 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T08:06:16,780 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T08:06:16,797 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T08:06:16,798 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,805 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=911db94732f6,46089,1733213175227, sessionid=0x10152212f4e0000, setting cluster-up flag (Was=false) 2024-12-03T08:06:16,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,822 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T08:06:16,823 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=911db94732f6,46089,1733213175227 2024-12-03T08:06:16,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:16,834 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T08:06:16,836 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=911db94732f6,46089,1733213175227 2024-12-03T08:06:16,841 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T08:06:16,880 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(746): ClusterId : a0fb6aaf-dc81-445c-98c4-aa387343d58e 2024-12-03T08:06:16,880 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(746): ClusterId : a0fb6aaf-dc81-445c-98c4-aa387343d58e 2024-12-03T08:06:16,880 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(746): ClusterId : a0fb6aaf-dc81-445c-98c4-aa387343d58e 2024-12-03T08:06:16,883 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T08:06:16,883 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T08:06:16,883 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T08:06:16,888 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T08:06:16,888 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T08:06:16,888 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T08:06:16,888 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T08:06:16,888 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T08:06:16,888 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T08:06:16,892 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T08:06:16,892 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T08:06:16,892 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T08:06:16,893 DEBUG [RS:1;911db94732f6:39531 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ead3451, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:16,893 DEBUG [RS:2;911db94732f6:34321 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27171fa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:16,893 DEBUG [RS:0;911db94732f6:38655 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23087d38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:16,911 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;911db94732f6:39531 2024-12-03T08:06:16,914 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;911db94732f6:38655 2024-12-03T08:06:16,916 INFO [RS:0;911db94732f6:38655 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T08:06:16,916 INFO [RS:0;911db94732f6:38655 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T08:06:16,916 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T08:06:16,916 INFO [RS:1;911db94732f6:39531 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T08:06:16,916 INFO [RS:1;911db94732f6:39531 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T08:06:16,916 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T08:06:16,919 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,46089,1733213175227 with port=38655, startcode=1733213175905 2024-12-03T08:06:16,920 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;911db94732f6:34321 2024-12-03T08:06:16,920 INFO [RS:2;911db94732f6:34321 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T08:06:16,920 INFO [RS:2;911db94732f6:34321 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T08:06:16,920 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T08:06:16,920 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,46089,1733213175227 with port=39531, startcode=1733213176011 2024-12-03T08:06:16,921 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,46089,1733213175227 with port=34321, startcode=1733213176054 2024-12-03T08:06:16,922 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:16,933 DEBUG [RS:2;911db94732f6:34321 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T08:06:16,933 DEBUG [RS:0;911db94732f6:38655 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T08:06:16,933 DEBUG [RS:1;911db94732f6:39531 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T08:06:16,933 INFO [master/911db94732f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T08:06:16,941 INFO [master/911db94732f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T08:06:16,947 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 911db94732f6,46089,1733213175227 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T08:06:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-03T08:06:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-03T08:06:16,956 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:16,956 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:16,957 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:16,957 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:16,957 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/911db94732f6:0, corePoolSize=10, maxPoolSize=10 2024-12-03T08:06:16,957 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:16,957 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:16,957 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:16,967 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:16,968 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T08:06:16,968 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733213206968 2024-12-03T08:06:16,970 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T08:06:16,972 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T08:06:16,974 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42941, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T08:06:16,974 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50507, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T08:06:16,974 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40837, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T08:06:16,976 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:16,976 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T08:06:16,977 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T08:06:16,977 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T08:06:16,978 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T08:06:16,978 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T08:06:16,980 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T08:06:16,979 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:16,982 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T08:06:16,983 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T08:06:16,983 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T08:06:16,986 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T08:06:16,987 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T08:06:16,990 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T08:06:16,990 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T08:06:16,993 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.large.0-1733213176991,5,FailOnTimeoutGroup] 2024-12-03T08:06:16,995 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.small.0-1733213176994,5,FailOnTimeoutGroup] 2024-12-03T08:06:16,995 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:16,995 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T08:06:16,996 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,996 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:16,996 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:16,997 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:35988 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35988 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:17,008 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T08:06:17,008 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T08:06:17,008 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T08:06:17,008 WARN [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T08:06:17,008 WARN [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T08:06:17,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-03T08:06:17,011 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:17,012 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T08:06:17,013 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc 2024-12-03T08:06:17,008 WARN [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T08:06:17,019 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:17,019 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:17,026 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:56930 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56930 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-03T08:06:17,032 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:17,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:17,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T08:06:17,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T08:06:17,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T08:06:17,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T08:06:17,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T08:06:17,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T08:06:17,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T08:06:17,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T08:06:17,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T08:06:17,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740 2024-12-03T08:06:17,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740 2024-12-03T08:06:17,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T08:06:17,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T08:06:17,054 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T08:06:17,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T08:06:17,062 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T08:06:17,063 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61145999, jitterRate=-0.0888536125421524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T08:06:17,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733213177034Initializing all the Stores at 1733213177035 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213177035Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213177036 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213177036Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213177036Cleaning up temporary data from old regions at 1733213177054 (+18 ms)Region opened successfully at 1733213177066 (+12 ms) 2024-12-03T08:06:17,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T08:06:17,067 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T08:06:17,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T08:06:17,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T08:06:17,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T08:06:17,069 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T08:06:17,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733213177066Disabling compacts and flushes for region at 1733213177066Disabling writes for close at 1733213177067 (+1 ms)Writing region close event to WAL at 1733213177068 (+1 ms)Closed at 1733213177068 2024-12-03T08:06:17,073 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:17,073 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T08:06:17,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T08:06:17,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T08:06:17,097 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T08:06:17,109 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,46089,1733213175227 with port=38655, startcode=1733213175905 2024-12-03T08:06:17,109 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,46089,1733213175227 with port=39531, startcode=1733213176011 2024-12-03T08:06:17,111 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 911db94732f6,39531,1733213176011 2024-12-03T08:06:17,113 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] master.ServerManager(517): Registering regionserver=911db94732f6,39531,1733213176011 2024-12-03T08:06:17,114 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,46089,1733213175227 with port=34321, startcode=1733213176054 2024-12-03T08:06:17,121 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 911db94732f6,38655,1733213175905 2024-12-03T08:06:17,121 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] master.ServerManager(517): Registering regionserver=911db94732f6,38655,1733213175905 2024-12-03T08:06:17,121 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc 2024-12-03T08:06:17,121 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35153 2024-12-03T08:06:17,121 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T08:06:17,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 911db94732f6,34321,1733213176054 2024-12-03T08:06:17,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46089 {}] master.ServerManager(517): Registering regionserver=911db94732f6,34321,1733213176054 2024-12-03T08:06:17,124 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc 2024-12-03T08:06:17,124 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35153 2024-12-03T08:06:17,124 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T08:06:17,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:17,127 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc 2024-12-03T08:06:17,127 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35153 2024-12-03T08:06:17,127 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T08:06:17,130 DEBUG [RS:1;911db94732f6:39531 {}] zookeeper.ZKUtil(111): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/911db94732f6,39531,1733213176011 2024-12-03T08:06:17,130 WARN [RS:1;911db94732f6:39531 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:17,130 INFO [RS:1;911db94732f6:39531 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T08:06:17,130 DEBUG [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,39531,1733213176011 2024-12-03T08:06:17,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:17,131 DEBUG [RS:0;911db94732f6:38655 {}] zookeeper.ZKUtil(111): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/911db94732f6,38655,1733213175905 2024-12-03T08:06:17,131 WARN [RS:0;911db94732f6:38655 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:17,131 INFO [RS:0;911db94732f6:38655 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T08:06:17,131 DEBUG [RS:2;911db94732f6:34321 {}] zookeeper.ZKUtil(111): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/911db94732f6,34321,1733213176054 2024-12-03T08:06:17,131 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905 2024-12-03T08:06:17,131 WARN [RS:2;911db94732f6:34321 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:17,131 INFO [RS:2;911db94732f6:34321 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T08:06:17,132 DEBUG [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,34321,1733213176054 2024-12-03T08:06:17,132 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [911db94732f6,38655,1733213175905] 2024-12-03T08:06:17,132 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [911db94732f6,39531,1733213176011] 2024-12-03T08:06:17,133 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [911db94732f6,34321,1733213176054] 2024-12-03T08:06:17,157 INFO [RS:1;911db94732f6:39531 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T08:06:17,157 INFO [RS:2;911db94732f6:34321 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T08:06:17,157 INFO [RS:0;911db94732f6:38655 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T08:06:17,171 INFO [RS:1;911db94732f6:39531 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T08:06:17,171 INFO [RS:2;911db94732f6:34321 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T08:06:17,172 INFO [RS:0;911db94732f6:38655 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T08:06:17,176 INFO [RS:1;911db94732f6:39531 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T08:06:17,176 INFO [RS:2;911db94732f6:34321 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T08:06:17,176 INFO [RS:0;911db94732f6:38655 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T08:06:17,176 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,176 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,176 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,177 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T08:06:17,180 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T08:06:17,181 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T08:06:17,183 INFO [RS:0;911db94732f6:38655 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T08:06:17,183 INFO [RS:1;911db94732f6:39531 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T08:06:17,183 INFO [RS:2;911db94732f6:34321 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T08:06:17,184 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,184 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,184 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,184 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,185 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:17,186 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:17,186 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:17,186 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,186 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,187 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,187 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,187 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:17,187 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:17,187 DEBUG [RS:0;911db94732f6:38655 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:17,187 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:17,187 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:17,187 DEBUG [RS:2;911db94732f6:34321 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:17,187 DEBUG [RS:1;911db94732f6:39531 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:17,189 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,189 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,189 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,38655,1733213175905-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,190 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,34321,1733213176054-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:17,192 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,192 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,192 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,192 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,192 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,193 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,39531,1733213176011-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:17,219 INFO [RS:1;911db94732f6:39531 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T08:06:17,219 INFO [RS:0;911db94732f6:38655 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T08:06:17,219 INFO [RS:2;911db94732f6:34321 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T08:06:17,222 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,34321,1733213176054-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,221 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,39531,1733213176011-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,221 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,38655,1733213175905-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,222 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,222 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,222 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,222 INFO [RS:0;911db94732f6:38655 {}] regionserver.Replication(171): 911db94732f6,38655,1733213175905 started 2024-12-03T08:06:17,222 INFO [RS:2;911db94732f6:34321 {}] regionserver.Replication(171): 911db94732f6,34321,1733213176054 started 2024-12-03T08:06:17,222 INFO [RS:1;911db94732f6:39531 {}] regionserver.Replication(171): 911db94732f6,39531,1733213176011 started 2024-12-03T08:06:17,247 WARN [911db94732f6:46089 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T08:06:17,249 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,249 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,249 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,250 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1482): Serving as 911db94732f6,38655,1733213175905, RpcServer on 911db94732f6/172.17.0.2:38655, sessionid=0x10152212f4e0001 2024-12-03T08:06:17,250 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(1482): Serving as 911db94732f6,39531,1733213176011, RpcServer on 911db94732f6/172.17.0.2:39531, sessionid=0x10152212f4e0002 2024-12-03T08:06:17,250 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(1482): Serving as 911db94732f6,34321,1733213176054, RpcServer on 911db94732f6/172.17.0.2:34321, sessionid=0x10152212f4e0003 2024-12-03T08:06:17,251 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T08:06:17,251 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T08:06:17,251 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T08:06:17,251 DEBUG [RS:1;911db94732f6:39531 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 911db94732f6,39531,1733213176011 2024-12-03T08:06:17,251 DEBUG [RS:0;911db94732f6:38655 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 911db94732f6,38655,1733213175905 2024-12-03T08:06:17,251 DEBUG [RS:2;911db94732f6:34321 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 911db94732f6,34321,1733213176054 2024-12-03T08:06:17,252 DEBUG [RS:0;911db94732f6:38655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,38655,1733213175905' 2024-12-03T08:06:17,252 DEBUG [RS:1;911db94732f6:39531 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,39531,1733213176011' 2024-12-03T08:06:17,252 DEBUG [RS:2;911db94732f6:34321 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,34321,1733213176054' 2024-12-03T08:06:17,252 DEBUG [RS:0;911db94732f6:38655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T08:06:17,252 DEBUG [RS:1;911db94732f6:39531 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T08:06:17,252 DEBUG [RS:2;911db94732f6:34321 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T08:06:17,253 DEBUG [RS:1;911db94732f6:39531 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T08:06:17,253 DEBUG [RS:2;911db94732f6:34321 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T08:06:17,253 DEBUG [RS:0;911db94732f6:38655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T08:06:17,254 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T08:06:17,254 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T08:06:17,254 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T08:06:17,254 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T08:06:17,254 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T08:06:17,254 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T08:06:17,254 DEBUG [RS:0;911db94732f6:38655 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 911db94732f6,38655,1733213175905 2024-12-03T08:06:17,254 DEBUG [RS:1;911db94732f6:39531 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 911db94732f6,39531,1733213176011 2024-12-03T08:06:17,254 DEBUG [RS:2;911db94732f6:34321 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 911db94732f6,34321,1733213176054 2024-12-03T08:06:17,254 DEBUG [RS:1;911db94732f6:39531 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,39531,1733213176011' 2024-12-03T08:06:17,254 DEBUG [RS:0;911db94732f6:38655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,38655,1733213175905' 2024-12-03T08:06:17,254 DEBUG [RS:2;911db94732f6:34321 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,34321,1733213176054' 2024-12-03T08:06:17,254 DEBUG [RS:0;911db94732f6:38655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T08:06:17,254 DEBUG [RS:1;911db94732f6:39531 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T08:06:17,254 DEBUG [RS:2;911db94732f6:34321 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T08:06:17,255 DEBUG [RS:1;911db94732f6:39531 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T08:06:17,255 DEBUG [RS:0;911db94732f6:38655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T08:06:17,255 DEBUG [RS:2;911db94732f6:34321 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T08:06:17,256 DEBUG [RS:1;911db94732f6:39531 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T08:06:17,256 DEBUG [RS:0;911db94732f6:38655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T08:06:17,256 INFO [RS:1;911db94732f6:39531 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T08:06:17,256 DEBUG [RS:2;911db94732f6:34321 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T08:06:17,256 INFO [RS:0;911db94732f6:38655 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T08:06:17,256 INFO [RS:1;911db94732f6:39531 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T08:06:17,256 INFO [RS:2;911db94732f6:34321 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T08:06:17,256 INFO [RS:2;911db94732f6:34321 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T08:06:17,256 INFO [RS:0;911db94732f6:38655 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T08:06:17,361 INFO [RS:2;911db94732f6:34321 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T08:06:17,362 INFO [RS:1;911db94732f6:39531 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T08:06:17,362 INFO [RS:0;911db94732f6:38655 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T08:06:17,365 INFO [RS:1;911db94732f6:39531 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C39531%2C1733213176011, suffix=, logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,39531,1733213176011, archiveDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs, maxLogs=32 2024-12-03T08:06:17,365 INFO [RS:2;911db94732f6:34321 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C34321%2C1733213176054, suffix=, logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,34321,1733213176054, archiveDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs, maxLogs=32 2024-12-03T08:06:17,365 INFO [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C38655%2C1733213175905, suffix=, logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905, archiveDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs, maxLogs=32 2024-12-03T08:06:17,385 DEBUG [RS:1;911db94732f6:39531 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,39531,1733213176011/911db94732f6%2C39531%2C1733213176011.1733213177370, exclude list is [], retry=0 2024-12-03T08:06:17,385 DEBUG [RS:2;911db94732f6:34321 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,34321,1733213176054/911db94732f6%2C34321%2C1733213176054.1733213177370, exclude list is [], retry=0 2024-12-03T08:06:17,385 DEBUG [RS:0;911db94732f6:38655 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905/911db94732f6%2C38655%2C1733213175905.1733213177370, exclude list is [], retry=0 2024-12-03T08:06:17,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43251,DS-6fd1f05f-2887-4568-aa99-464de1dfeefa,DISK] 2024-12-03T08:06:17,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39715,DS-936e5da1-52d2-4398-ae0a-1ec94da604f3,DISK] 2024-12-03T08:06:17,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39715,DS-936e5da1-52d2-4398-ae0a-1ec94da604f3,DISK] 2024-12-03T08:06:17,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43251,DS-6fd1f05f-2887-4568-aa99-464de1dfeefa,DISK] 2024-12-03T08:06:17,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39291,DS-7448b040-3754-49ef-b37f-8ea71d12640e,DISK] 2024-12-03T08:06:17,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39715,DS-936e5da1-52d2-4398-ae0a-1ec94da604f3,DISK] 2024-12-03T08:06:17,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39291,DS-7448b040-3754-49ef-b37f-8ea71d12640e,DISK] 2024-12-03T08:06:17,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39291,DS-7448b040-3754-49ef-b37f-8ea71d12640e,DISK] 2024-12-03T08:06:17,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43251,DS-6fd1f05f-2887-4568-aa99-464de1dfeefa,DISK] 2024-12-03T08:06:17,422 INFO [RS:1;911db94732f6:39531 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,39531,1733213176011/911db94732f6%2C39531%2C1733213176011.1733213177370 2024-12-03T08:06:17,423 INFO [RS:2;911db94732f6:34321 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,34321,1733213176054/911db94732f6%2C34321%2C1733213176054.1733213177370 2024-12-03T08:06:17,423 INFO [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905/911db94732f6%2C38655%2C1733213175905.1733213177370 2024-12-03T08:06:17,429 DEBUG [RS:1;911db94732f6:39531 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35865:35865),(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:39841:39841)] 2024-12-03T08:06:17,429 DEBUG [RS:2;911db94732f6:34321 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:39841:39841),(127.0.0.1/127.0.0.1:35865:35865)] 2024-12-03T08:06:17,430 DEBUG [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:39841:39841),(127.0.0.1/127.0.0.1:35865:35865)] 2024-12-03T08:06:17,500 DEBUG [911db94732f6:46089 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-03T08:06:17,507 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(204): Hosts are {911db94732f6=0} racks are {/default-rack=0} 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T08:06:17,514 INFO [911db94732f6:46089 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T08:06:17,514 INFO [911db94732f6:46089 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T08:06:17,514 INFO [911db94732f6:46089 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T08:06:17,514 DEBUG [911db94732f6:46089 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T08:06:17,521 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=911db94732f6,38655,1733213175905 2024-12-03T08:06:17,528 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 911db94732f6,38655,1733213175905, state=OPENING 2024-12-03T08:06:17,532 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T08:06:17,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:17,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:17,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:17,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:17,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,537 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T08:06:17,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=911db94732f6,38655,1733213175905}] 2024-12-03T08:06:17,713 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T08:06:17,716 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56079, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T08:06:17,728 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T08:06:17,728 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T08:06:17,729 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-03T08:06:17,732 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C38655%2C1733213175905.meta, suffix=.meta, logDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905, archiveDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs, maxLogs=32 2024-12-03T08:06:17,748 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905/911db94732f6%2C38655%2C1733213175905.meta.1733213177734.meta, exclude list is [], retry=0 2024-12-03T08:06:17,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43251,DS-6fd1f05f-2887-4568-aa99-464de1dfeefa,DISK] 2024-12-03T08:06:17,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39715,DS-936e5da1-52d2-4398-ae0a-1ec94da604f3,DISK] 2024-12-03T08:06:17,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39291,DS-7448b040-3754-49ef-b37f-8ea71d12640e,DISK] 2024-12-03T08:06:17,756 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/WALs/911db94732f6,38655,1733213175905/911db94732f6%2C38655%2C1733213175905.meta.1733213177734.meta 2024-12-03T08:06:17,757 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:39841:39841),(127.0.0.1/127.0.0.1:35865:35865)] 2024-12-03T08:06:17,757 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T08:06:17,759 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T08:06:17,761 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T08:06:17,766 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T08:06:17,770 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T08:06:17,771 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:17,771 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T08:06:17,771 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T08:06:17,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T08:06:17,775 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T08:06:17,775 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,776 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,776 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T08:06:17,778 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T08:06:17,778 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T08:06:17,780 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T08:06:17,780 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T08:06:17,782 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T08:06:17,782 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:17,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:17,783 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T08:06:17,784 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740 2024-12-03T08:06:17,787 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740 2024-12-03T08:06:17,789 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T08:06:17,789 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T08:06:17,790 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T08:06:17,793 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T08:06:17,794 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67879626, jitterRate=0.011485248804092407}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T08:06:17,794 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T08:06:17,795 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733213177772Writing region info on filesystem at 1733213177772Initializing all the Stores at 1733213177773 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213177773Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213177774 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213177774Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213177774Cleaning up temporary data from old regions at 1733213177789 (+15 ms)Running coprocessor post-open hooks at 1733213177794 (+5 ms)Region opened successfully at 1733213177795 (+1 ms) 2024-12-03T08:06:17,802 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733213177704 2024-12-03T08:06:17,814 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T08:06:17,814 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T08:06:17,815 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=911db94732f6,38655,1733213175905 2024-12-03T08:06:17,817 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 911db94732f6,38655,1733213175905, state=OPEN 2024-12-03T08:06:17,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:17,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:17,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:17,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:17,820 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,820 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,820 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,820 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:17,820 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=911db94732f6,38655,1733213175905 2024-12-03T08:06:17,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T08:06:17,826 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=911db94732f6,38655,1733213175905 in 283 msec 2024-12-03T08:06:17,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T08:06:17,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 745 msec 2024-12-03T08:06:17,833 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:17,833 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T08:06:17,856 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T08:06:17,858 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=911db94732f6,38655,1733213175905, seqNum=-1] 2024-12-03T08:06:17,884 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T08:06:17,886 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55145, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T08:06:17,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0340 sec 2024-12-03T08:06:17,906 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733213177905, completionTime=-1 2024-12-03T08:06:17,908 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-03T08:06:17,908 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T08:06:17,935 INFO [master/911db94732f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-03T08:06:17,935 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733213237935 2024-12-03T08:06:17,935 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733213297935 2024-12-03T08:06:17,936 INFO [master/911db94732f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-03T08:06:17,937 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-03T08:06:17,946 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,46089,1733213175227-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,946 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,46089,1733213175227-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,946 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,46089,1733213175227-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,948 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-911db94732f6:46089, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,949 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,949 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:17,955 DEBUG [master/911db94732f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T08:06:18,004 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.877sec 2024-12-03T08:06:18,006 INFO [master/911db94732f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T08:06:18,007 INFO [master/911db94732f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T08:06:18,008 INFO [master/911db94732f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T08:06:18,009 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T08:06:18,009 INFO [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T08:06:18,009 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,46089,1733213175227-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:18,010 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,46089,1733213175227-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T08:06:18,014 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T08:06:18,015 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T08:06:18,016 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,46089,1733213175227-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:18,104 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f32acaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T08:06:18,108 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T08:06:18,108 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T08:06:18,112 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 911db94732f6,46089,-1 for getting cluster id 2024-12-03T08:06:18,115 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T08:06:18,123 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a0fb6aaf-dc81-445c-98c4-aa387343d58e' 2024-12-03T08:06:18,125 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T08:06:18,126 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a0fb6aaf-dc81-445c-98c4-aa387343d58e" 2024-12-03T08:06:18,126 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cd211e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T08:06:18,126 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [911db94732f6,46089,-1] 2024-12-03T08:06:18,128 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T08:06:18,130 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:18,131 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T08:06:18,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9fe7c2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T08:06:18,134 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T08:06:18,141 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=911db94732f6,38655,1733213175905, seqNum=-1] 2024-12-03T08:06:18,141 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T08:06:18,143 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T08:06:18,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=911db94732f6,46089,1733213175227 2024-12-03T08:06:18,176 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T08:06:18,180 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 911db94732f6,46089,1733213175227 2024-12-03T08:06:18,183 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6bc7313f 2024-12-03T08:06:18,183 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T08:06:18,186 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T08:06:18,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T08:06:18,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-03T08:06:18,202 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T08:06:18,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-03T08:06:18,204 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:18,206 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T08:06:18,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:18,215 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:18,215 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:18,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:56980 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56980 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:18,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-03T08:06:18,225 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:18,229 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 961dd20c1bf5aa712d29b056e4d9ac34, NAME => 'TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc 2024-12-03T08:06:18,235 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:18,235 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:18,239 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:36050 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36050 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:18,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-03T08:06:18,244 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:18,245 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:18,245 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 961dd20c1bf5aa712d29b056e4d9ac34, disabling compactions & flushes 2024-12-03T08:06:18,245 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,245 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,245 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. after waiting 0 ms 2024-12-03T08:06:18,246 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,246 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,246 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 961dd20c1bf5aa712d29b056e4d9ac34: Waiting for close lock at 1733213178245Disabling compacts and flushes for region at 1733213178245Disabling writes for close at 1733213178246 (+1 ms)Writing region close event to WAL at 1733213178246Closed at 1733213178246 2024-12-03T08:06:18,248 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T08:06:18,254 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733213178248"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733213178248"}]},"ts":"1733213178248"} 2024-12-03T08:06:18,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T08:06:18,262 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T08:06:18,265 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733213178262"}]},"ts":"1733213178262"} 2024-12-03T08:06:18,270 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-03T08:06:18,270 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {911db94732f6=0} racks are {/default-rack=0} 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T08:06:18,272 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T08:06:18,272 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T08:06:18,272 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T08:06:18,272 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T08:06:18,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=961dd20c1bf5aa712d29b056e4d9ac34, ASSIGN}] 2024-12-03T08:06:18,276 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=961dd20c1bf5aa712d29b056e4d9ac34, ASSIGN 2024-12-03T08:06:18,278 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=961dd20c1bf5aa712d29b056e4d9ac34, ASSIGN; state=OFFLINE, location=911db94732f6,38655,1733213175905; forceNewPlan=false, retain=false 2024-12-03T08:06:18,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:18,431 INFO [911db94732f6:46089 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T08:06:18,431 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=961dd20c1bf5aa712d29b056e4d9ac34, regionState=OPENING, regionLocation=911db94732f6,38655,1733213175905 2024-12-03T08:06:18,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=961dd20c1bf5aa712d29b056e4d9ac34, ASSIGN because future has completed 2024-12-03T08:06:18,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 961dd20c1bf5aa712d29b056e4d9ac34, server=911db94732f6,38655,1733213175905}] 2024-12-03T08:06:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:18,596 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,596 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 961dd20c1bf5aa712d29b056e4d9ac34, NAME => 'TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34.', STARTKEY => '', ENDKEY => ''} 2024-12-03T08:06:18,596 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,596 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:18,597 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,597 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,599 INFO [StoreOpener-961dd20c1bf5aa712d29b056e4d9ac34-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,601 INFO [StoreOpener-961dd20c1bf5aa712d29b056e4d9ac34-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 961dd20c1bf5aa712d29b056e4d9ac34 columnFamilyName cf 2024-12-03T08:06:18,601 DEBUG [StoreOpener-961dd20c1bf5aa712d29b056e4d9ac34-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:18,602 INFO [StoreOpener-961dd20c1bf5aa712d29b056e4d9ac34-1 {}] regionserver.HStore(327): Store=961dd20c1bf5aa712d29b056e4d9ac34/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:18,602 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,603 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,604 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,604 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,604 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,607 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,612 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T08:06:18,612 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 961dd20c1bf5aa712d29b056e4d9ac34; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67851269, jitterRate=0.01106269657611847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T08:06:18,612 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:18,613 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 961dd20c1bf5aa712d29b056e4d9ac34: Running coprocessor pre-open hook at 1733213178597Writing region info on filesystem at 1733213178597Initializing all the Stores at 1733213178598 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213178598Cleaning up temporary data from old regions at 1733213178604 (+6 ms)Running coprocessor post-open hooks at 1733213178613 (+9 ms)Region opened successfully at 1733213178613 2024-12-03T08:06:18,615 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34., pid=6, masterSystemTime=1733213178589 2024-12-03T08:06:18,618 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,618 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:18,620 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=961dd20c1bf5aa712d29b056e4d9ac34, regionState=OPEN, openSeqNum=2, regionLocation=911db94732f6,38655,1733213175905 2024-12-03T08:06:18,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 961dd20c1bf5aa712d29b056e4d9ac34, server=911db94732f6,38655,1733213175905 because future has completed 2024-12-03T08:06:18,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T08:06:18,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 961dd20c1bf5aa712d29b056e4d9ac34, server=911db94732f6,38655,1733213175905 in 189 msec 2024-12-03T08:06:18,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T08:06:18,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=961dd20c1bf5aa712d29b056e4d9ac34, ASSIGN in 355 msec 2024-12-03T08:06:18,634 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T08:06:18,634 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733213178634"}]},"ts":"1733213178634"} 2024-12-03T08:06:18,636 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-03T08:06:18,638 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T08:06:18,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 444 msec 2024-12-03T08:06:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:18,839 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T08:06:18,839 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-03T08:06:18,840 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T08:06:18,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-03T08:06:18,845 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T08:06:18,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-03T08:06:18,853 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34., hostname=911db94732f6,38655,1733213175905, seqNum=2] 2024-12-03T08:06:18,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-03T08:06:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-03T08:06:18,870 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-03T08:06:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T08:06:18,872 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T08:06:18,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T08:06:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T08:06:19,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38655 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T08:06:19,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,040 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 961dd20c1bf5aa712d29b056e4d9ac34 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-03T08:06:19,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/.tmp/cf/955204821ae141e2b2ceafa1e792457c is 36, key is row/cf:cq/1733213178856/Put/seqid=0 2024-12-03T08:06:19,100 WARN [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,100 WARN [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_918513459_22 at /127.0.0.1:56998 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56998 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-03T08:06:19,110 WARN [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,110 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/.tmp/cf/955204821ae141e2b2ceafa1e792457c 2024-12-03T08:06:19,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/.tmp/cf/955204821ae141e2b2ceafa1e792457c as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/cf/955204821ae141e2b2ceafa1e792457c 2024-12-03T08:06:19,163 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/cf/955204821ae141e2b2ceafa1e792457c, entries=1, sequenceid=5, filesize=4.7 K 2024-12-03T08:06:19,169 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 961dd20c1bf5aa712d29b056e4d9ac34 in 130ms, sequenceid=5, compaction requested=false 2024-12-03T08:06:19,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-03T08:06:19,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 961dd20c1bf5aa712d29b056e4d9ac34: 2024-12-03T08:06:19,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T08:06:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T08:06:19,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T08:06:19,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 304 msec 2024-12-03T08:06:19,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 318 msec 2024-12-03T08:06:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46089 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T08:06:19,188 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T08:06:19,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T08:06:19,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T08:06:19,202 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:19,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,208 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T08:06:19,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T08:06:19,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=880738534, stopped=false 2024-12-03T08:06:19,208 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=911db94732f6,46089,1733213175227 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:19,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:19,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:19,211 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T08:06:19,211 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T08:06:19,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:19,211 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:19,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:19,212 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '911db94732f6,38655,1733213175905' ***** 2024-12-03T08:06:19,212 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T08:06:19,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:19,212 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '911db94732f6,39531,1733213176011' ***** 2024-12-03T08:06:19,212 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T08:06:19,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:19,212 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '911db94732f6,34321,1733213176054' ***** 2024-12-03T08:06:19,212 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T08:06:19,212 INFO [RS:1;911db94732f6:39531 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T08:06:19,212 INFO [RS:2;911db94732f6:34321 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T08:06:19,213 INFO [RS:0;911db94732f6:38655 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T08:06:19,213 INFO [RS:1;911db94732f6:39531 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T08:06:19,213 INFO [RS:0;911db94732f6:38655 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T08:06:19,213 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T08:06:19,213 INFO [RS:1;911db94732f6:39531 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T08:06:19,213 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T08:06:19,213 INFO [RS:0;911db94732f6:38655 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T08:06:19,213 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T08:06:19,213 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(959): stopping server 911db94732f6,39531,1733213176011 2024-12-03T08:06:19,213 INFO [RS:1;911db94732f6:39531 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:19,213 INFO [RS:2;911db94732f6:34321 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T08:06:19,213 INFO [RS:1;911db94732f6:39531 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;911db94732f6:39531. 2024-12-03T08:06:19,213 INFO [RS:2;911db94732f6:34321 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T08:06:19,214 DEBUG [RS:1;911db94732f6:39531 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:19,214 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(959): stopping server 911db94732f6,34321,1733213176054 2024-12-03T08:06:19,214 DEBUG [RS:1;911db94732f6:39531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,214 INFO [RS:2;911db94732f6:34321 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:19,214 INFO [RS:2;911db94732f6:34321 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;911db94732f6:34321. 2024-12-03T08:06:19,214 DEBUG [RS:2;911db94732f6:34321 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:19,214 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(976): stopping server 911db94732f6,39531,1733213176011; all regions closed. 2024-12-03T08:06:19,214 DEBUG [RS:2;911db94732f6:34321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,214 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(976): stopping server 911db94732f6,34321,1733213176054; all regions closed. 2024-12-03T08:06:19,214 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(3091): Received CLOSE for 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:19,215 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(959): stopping server 911db94732f6,38655,1733213175905 2024-12-03T08:06:19,215 INFO [RS:0;911db94732f6:38655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:19,215 INFO [RS:0;911db94732f6:38655 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;911db94732f6:38655. 2024-12-03T08:06:19,215 DEBUG [RS:0;911db94732f6:38655 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:19,215 DEBUG [RS:0;911db94732f6:38655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,215 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 961dd20c1bf5aa712d29b056e4d9ac34, disabling compactions & flushes 2024-12-03T08:06:19,216 INFO [RS:0;911db94732f6:38655 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T08:06:19,216 INFO [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,216 INFO [RS:0;911db94732f6:38655 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T08:06:19,216 INFO [RS:0;911db94732f6:38655 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T08:06:19,216 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,216 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. after waiting 0 ms 2024-12-03T08:06:19,216 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T08:06:19,216 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,216 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T08:06:19,216 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 961dd20c1bf5aa712d29b056e4d9ac34=TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34.} 2024-12-03T08:06:19,217 DEBUG [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 961dd20c1bf5aa712d29b056e4d9ac34 2024-12-03T08:06:19,217 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T08:06:19,217 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T08:06:19,217 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T08:06:19,217 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T08:06:19,217 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T08:06:19,218 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-03T08:06:19,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_1073741826_1016 (size=93) 2024-12-03T08:06:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_1073741827_1017 (size=93) 2024-12-03T08:06:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_1073741826_1016 (size=93) 2024-12-03T08:06:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_1073741827_1017 (size=93) 2024-12-03T08:06:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_1073741826_1016 (size=93) 2024-12-03T08:06:19,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_1073741827_1017 (size=93) 2024-12-03T08:06:19,229 DEBUG [RS:2;911db94732f6:34321 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs 2024-12-03T08:06:19,230 INFO [RS:2;911db94732f6:34321 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 911db94732f6%2C34321%2C1733213176054:(num 1733213177370) 2024-12-03T08:06:19,230 DEBUG [RS:2;911db94732f6:34321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,230 INFO [RS:2;911db94732f6:34321 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:19,230 DEBUG [RS:1;911db94732f6:39531 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs 2024-12-03T08:06:19,230 INFO [RS:1;911db94732f6:39531 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 911db94732f6%2C39531%2C1733213176011:(num 1733213177370) 2024-12-03T08:06:19,230 DEBUG [RS:1;911db94732f6:39531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,230 INFO [RS:2;911db94732f6:34321 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:19,230 INFO [RS:1;911db94732f6:39531 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:19,230 INFO [RS:1;911db94732f6:39531 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:19,230 INFO [RS:2;911db94732f6:34321 {}] hbase.ChoreService(370): Chore service for: regionserver/911db94732f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:19,230 INFO [RS:2;911db94732f6:34321 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T08:06:19,231 INFO [RS:2;911db94732f6:34321 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T08:06:19,230 INFO [regionserver/911db94732f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:19,230 INFO [RS:1;911db94732f6:39531 {}] hbase.ChoreService(370): Chore service for: regionserver/911db94732f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:19,231 INFO [RS:2;911db94732f6:34321 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T08:06:19,231 INFO [RS:2;911db94732f6:34321 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:19,231 INFO [RS:1;911db94732f6:39531 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T08:06:19,231 INFO [regionserver/911db94732f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:19,231 INFO [RS:1;911db94732f6:39531 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T08:06:19,231 INFO [RS:1;911db94732f6:39531 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T08:06:19,231 INFO [RS:1;911db94732f6:39531 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:19,231 INFO [RS:2;911db94732f6:34321 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34321 2024-12-03T08:06:19,231 INFO [RS:1;911db94732f6:39531 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39531 2024-12-03T08:06:19,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/911db94732f6,39531,1733213176011 2024-12-03T08:06:19,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:19,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/911db94732f6,34321,1733213176054 2024-12-03T08:06:19,235 INFO [RS:2;911db94732f6:34321 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:19,235 INFO [RS:1;911db94732f6:39531 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:19,237 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/default/TestHBaseWalOnEC/961dd20c1bf5aa712d29b056e4d9ac34/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T08:06:19,238 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [911db94732f6,34321,1733213176054] 2024-12-03T08:06:19,239 INFO [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,239 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/911db94732f6,34321,1733213176054 already deleted, retry=false 2024-12-03T08:06:19,239 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 961dd20c1bf5aa712d29b056e4d9ac34: Waiting for close lock at 1733213179215Running coprocessor pre-close hooks at 1733213179215Disabling compacts and flushes for region at 1733213179215Disabling writes for close at 1733213179216 (+1 ms)Writing region close event to WAL at 1733213179222 (+6 ms)Running coprocessor post-close hooks at 1733213179238 (+16 ms)Closed at 1733213179239 (+1 ms) 2024-12-03T08:06:19,240 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 911db94732f6,34321,1733213176054 expired; onlineServers=2 2024-12-03T08:06:19,240 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [911db94732f6,39531,1733213176011] 2024-12-03T08:06:19,240 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34. 2024-12-03T08:06:19,241 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/911db94732f6,39531,1733213176011 already deleted, retry=false 2024-12-03T08:06:19,241 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 911db94732f6,39531,1733213176011 expired; onlineServers=1 2024-12-03T08:06:19,252 INFO [regionserver/911db94732f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T08:06:19,252 INFO [regionserver/911db94732f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T08:06:19,258 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/info/a092aca4e6814d3eb63f2890826cf512 is 153, key is TestHBaseWalOnEC,,1733213178187.961dd20c1bf5aa712d29b056e4d9ac34./info:regioninfo/1733213178619/Put/seqid=0 2024-12-03T08:06:19,261 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,261 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_918513459_22 at /127.0.0.1:36068 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36068 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-03T08:06:19,270 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,271 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/info/a092aca4e6814d3eb63f2890826cf512 2024-12-03T08:06:19,293 INFO [regionserver/911db94732f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:19,293 INFO [regionserver/911db94732f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:19,294 INFO [regionserver/911db94732f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:19,297 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/ns/6f3ae5577d484fc3b2e51b5d97576da6 is 43, key is default/ns:d/1733213177891/Put/seqid=0 2024-12-03T08:06:19,299 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,299 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,303 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_918513459_22 at /127.0.0.1:57004 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57004 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-03T08:06:19,307 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,307 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/ns/6f3ae5577d484fc3b2e51b5d97576da6 2024-12-03T08:06:19,335 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/table/00a0c4e3233e4e1c93bdf384ed2ce7df is 52, key is TestHBaseWalOnEC/table:state/1733213178634/Put/seqid=0 2024-12-03T08:06:19,337 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,337 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34321-0x10152212f4e0003, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39531-0x10152212f4e0002, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,338 INFO [RS:2;911db94732f6:34321 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:19,338 INFO [RS:1;911db94732f6:39531 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:19,338 INFO [RS:1;911db94732f6:39531 {}] regionserver.HRegionServer(1031): Exiting; stopping=911db94732f6,39531,1733213176011; zookeeper connection closed. 2024-12-03T08:06:19,338 INFO [RS:2;911db94732f6:34321 {}] regionserver.HRegionServer(1031): Exiting; stopping=911db94732f6,34321,1733213176054; zookeeper connection closed. 2024-12-03T08:06:19,339 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@72d89970 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@72d89970 2024-12-03T08:06:19,339 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2d9cbced {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2d9cbced 2024-12-03T08:06:19,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_918513459_22 at /127.0.0.1:40060 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40060 dst: /127.0.0.1:43251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-03T08:06:19,347 WARN [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,347 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/table/00a0c4e3233e4e1c93bdf384ed2ce7df 2024-12-03T08:06:19,356 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/info/a092aca4e6814d3eb63f2890826cf512 as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/info/a092aca4e6814d3eb63f2890826cf512 2024-12-03T08:06:19,365 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/info/a092aca4e6814d3eb63f2890826cf512, entries=10, sequenceid=11, filesize=6.5 K 2024-12-03T08:06:19,367 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/ns/6f3ae5577d484fc3b2e51b5d97576da6 as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/ns/6f3ae5577d484fc3b2e51b5d97576da6 2024-12-03T08:06:19,375 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/ns/6f3ae5577d484fc3b2e51b5d97576da6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T08:06:19,376 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/.tmp/table/00a0c4e3233e4e1c93bdf384ed2ce7df as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/table/00a0c4e3233e4e1c93bdf384ed2ce7df 2024-12-03T08:06:19,385 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/table/00a0c4e3233e4e1c93bdf384ed2ce7df, entries=2, sequenceid=11, filesize=5.1 K 2024-12-03T08:06:19,387 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 170ms, sequenceid=11, compaction requested=false 2024-12-03T08:06:19,387 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T08:06:19,396 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T08:06:19,397 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T08:06:19,397 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T08:06:19,397 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733213179217Running coprocessor pre-close hooks at 1733213179217Disabling compacts and flushes for region at 1733213179217Disabling writes for close at 1733213179217Obtaining lock to block concurrent updates at 1733213179218 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733213179218Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733213179219 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733213179222 (+3 ms)Flushing 1588230740/info: creating writer at 1733213179223 (+1 ms)Flushing 1588230740/info: appending metadata at 1733213179255 (+32 ms)Flushing 1588230740/info: closing flushed file at 1733213179255Flushing 1588230740/ns: creating writer at 1733213179281 (+26 ms)Flushing 1588230740/ns: appending metadata at 1733213179297 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733213179297Flushing 1588230740/table: creating writer at 1733213179316 (+19 ms)Flushing 1588230740/table: appending metadata at 1733213179334 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733213179334Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1006ead7: reopening flushed file at 1733213179355 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b220a20: reopening flushed file at 1733213179365 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fe06f81: reopening flushed file at 1733213179375 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 170ms, sequenceid=11, compaction requested=false at 1733213179387 (+12 ms)Writing region close event to WAL at 1733213179389 (+2 ms)Running coprocessor post-close hooks at 1733213179396 (+7 ms)Closed at 1733213179397 (+1 ms) 2024-12-03T08:06:19,397 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T08:06:19,417 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(976): stopping server 911db94732f6,38655,1733213175905; all regions closed. 2024-12-03T08:06:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_1073741829_1019 (size=2751) 2024-12-03T08:06:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_1073741829_1019 (size=2751) 2024-12-03T08:06:19,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_1073741829_1019 (size=2751) 2024-12-03T08:06:19,423 DEBUG [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs 2024-12-03T08:06:19,424 INFO [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 911db94732f6%2C38655%2C1733213175905.meta:.meta(num 1733213177734) 2024-12-03T08:06:19,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_1073741828_1018 (size=1298) 2024-12-03T08:06:19,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_1073741828_1018 (size=1298) 2024-12-03T08:06:19,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_1073741828_1018 (size=1298) 2024-12-03T08:06:19,429 DEBUG [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/oldWALs 2024-12-03T08:06:19,429 INFO [RS:0;911db94732f6:38655 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 911db94732f6%2C38655%2C1733213175905:(num 1733213177370) 2024-12-03T08:06:19,429 DEBUG [RS:0;911db94732f6:38655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:19,429 INFO [RS:0;911db94732f6:38655 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:19,430 INFO [RS:0;911db94732f6:38655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:19,430 INFO [RS:0;911db94732f6:38655 {}] hbase.ChoreService(370): Chore service for: regionserver/911db94732f6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:19,430 INFO [RS:0;911db94732f6:38655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:19,430 INFO [regionserver/911db94732f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:19,430 INFO [RS:0;911db94732f6:38655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38655 2024-12-03T08:06:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/911db94732f6,38655,1733213175905 2024-12-03T08:06:19,432 INFO [RS:0;911db94732f6:38655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:19,433 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [911db94732f6,38655,1733213175905] 2024-12-03T08:06:19,435 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/911db94732f6,38655,1733213175905 already deleted, retry=false 2024-12-03T08:06:19,435 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 911db94732f6,38655,1733213175905 expired; onlineServers=0 2024-12-03T08:06:19,435 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '911db94732f6,46089,1733213175227' ***** 2024-12-03T08:06:19,435 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T08:06:19,435 INFO [M:0;911db94732f6:46089 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:19,435 INFO [M:0;911db94732f6:46089 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:19,435 DEBUG [M:0;911db94732f6:46089 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T08:06:19,436 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T08:06:19,436 DEBUG [M:0;911db94732f6:46089 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T08:06:19,436 DEBUG [master/911db94732f6:0:becomeActiveMaster-HFileCleaner.large.0-1733213176991 {}] cleaner.HFileCleaner(306): Exit Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.large.0-1733213176991,5,FailOnTimeoutGroup] 2024-12-03T08:06:19,436 DEBUG [master/911db94732f6:0:becomeActiveMaster-HFileCleaner.small.0-1733213176994 {}] cleaner.HFileCleaner(306): Exit Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.small.0-1733213176994,5,FailOnTimeoutGroup] 2024-12-03T08:06:19,436 INFO [M:0;911db94732f6:46089 {}] hbase.ChoreService(370): Chore service for: master/911db94732f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:19,436 INFO [M:0;911db94732f6:46089 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:19,436 DEBUG [M:0;911db94732f6:46089 {}] master.HMaster(1795): Stopping service threads 2024-12-03T08:06:19,436 INFO [M:0;911db94732f6:46089 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T08:06:19,436 INFO [M:0;911db94732f6:46089 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T08:06:19,437 INFO [M:0;911db94732f6:46089 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T08:06:19,437 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T08:06:19,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:19,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:19,438 DEBUG [M:0;911db94732f6:46089 {}] zookeeper.ZKUtil(347): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T08:06:19,438 WARN [M:0;911db94732f6:46089 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T08:06:19,439 INFO [M:0;911db94732f6:46089 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/.lastflushedseqids 2024-12-03T08:06:19,447 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,447 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:36084 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36084 dst: /127.0.0.1:39291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_-9223372036854775584_1033 (size=137) 2024-12-03T08:06:19,454 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,454 INFO [M:0;911db94732f6:46089 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T08:06:19,454 INFO [M:0;911db94732f6:46089 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T08:06:19,454 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T08:06:19,454 INFO [M:0;911db94732f6:46089 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:19,455 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:19,455 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T08:06:19,455 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:19,455 INFO [M:0;911db94732f6:46089 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-03T08:06:19,474 DEBUG [M:0;911db94732f6:46089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aada4e65928e4ba7aec69ee63186a5bc is 82, key is hbase:meta,,1/info:regioninfo/1733213177815/Put/seqid=0 2024-12-03T08:06:19,475 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,476 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:40078 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:43251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40078 dst: /127.0.0.1:43251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-03T08:06:19,482 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,482 INFO [M:0;911db94732f6:46089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aada4e65928e4ba7aec69ee63186a5bc 2024-12-03T08:06:19,507 DEBUG [M:0;911db94732f6:46089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/67b4a622ee2f45c895bdf13154862a05 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733213178639/Put/seqid=0 2024-12-03T08:06:19,509 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,509 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,511 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:57024 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57024 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-03T08:06:19,516 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,516 INFO [M:0;911db94732f6:46089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/67b4a622ee2f45c895bdf13154862a05 2024-12-03T08:06:19,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,534 INFO [RS:0;911db94732f6:38655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:19,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38655-0x10152212f4e0001, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,534 INFO [RS:0;911db94732f6:38655 {}] regionserver.HRegionServer(1031): Exiting; stopping=911db94732f6,38655,1733213175905; zookeeper connection closed. 2024-12-03T08:06:19,535 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67f6ab57 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67f6ab57 2024-12-03T08:06:19,535 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-03T08:06:19,539 DEBUG [M:0;911db94732f6:46089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7216a2ddaa204becba7458a8524cd7a4 is 69, key is 911db94732f6,34321,1733213176054/rs:state/1733213177124/Put/seqid=0 2024-12-03T08:06:19,541 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,541 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T08:06:19,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-460219697_22 at /127.0.0.1:57034 [Receiving block BP-1083000352-172.17.0.2-1733213172129:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39715:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57034 dst: /127.0.0.1:39715 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T08:06:19,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-03T08:06:19,547 WARN [M:0;911db94732f6:46089 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T08:06:19,547 INFO [M:0;911db94732f6:46089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7216a2ddaa204becba7458a8524cd7a4 2024-12-03T08:06:19,556 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aada4e65928e4ba7aec69ee63186a5bc as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aada4e65928e4ba7aec69ee63186a5bc 2024-12-03T08:06:19,563 INFO [M:0;911db94732f6:46089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aada4e65928e4ba7aec69ee63186a5bc, entries=8, sequenceid=72, filesize=5.5 K 2024-12-03T08:06:19,565 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/67b4a622ee2f45c895bdf13154862a05 as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/67b4a622ee2f45c895bdf13154862a05 2024-12-03T08:06:19,573 INFO [M:0;911db94732f6:46089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/67b4a622ee2f45c895bdf13154862a05, entries=8, sequenceid=72, filesize=6.3 K 2024-12-03T08:06:19,575 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7216a2ddaa204becba7458a8524cd7a4 as hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7216a2ddaa204becba7458a8524cd7a4 2024-12-03T08:06:19,582 INFO [M:0;911db94732f6:46089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7216a2ddaa204becba7458a8524cd7a4, entries=3, sequenceid=72, filesize=5.2 K 2024-12-03T08:06:19,584 INFO [M:0;911db94732f6:46089 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=72, compaction requested=false 2024-12-03T08:06:19,585 INFO [M:0;911db94732f6:46089 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:19,585 DEBUG [M:0;911db94732f6:46089 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733213179454Disabling compacts and flushes for region at 1733213179454Disabling writes for close at 1733213179455 (+1 ms)Obtaining lock to block concurrent updates at 1733213179455Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733213179455Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733213179456 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733213179456Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733213179457 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733213179473 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733213179473Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733213179490 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733213179506 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733213179507 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733213179523 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733213179539 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733213179539Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6731aa4e: reopening flushed file at 1733213179554 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51ca0a77: reopening flushed file at 1733213179563 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a02c3b: reopening flushed file at 1733213179574 (+11 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=72, compaction requested=false at 1733213179584 (+10 ms)Writing region close event to WAL at 1733213179585 (+1 ms)Closed at 1733213179585 2024-12-03T08:06:19,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39715 is added to blk_1073741825_1011 (size=32665) 2024-12-03T08:06:19,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43251 is added to blk_1073741825_1011 (size=32665) 2024-12-03T08:06:19,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39291 is added to blk_1073741825_1011 (size=32665) 2024-12-03T08:06:19,591 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:19,591 INFO [M:0;911db94732f6:46089 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T08:06:19,591 INFO [M:0;911db94732f6:46089 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46089 2024-12-03T08:06:19,591 INFO [M:0;911db94732f6:46089 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:19,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,693 INFO [M:0;911db94732f6:46089 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:19,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46089-0x10152212f4e0000, quorum=127.0.0.1:57724, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:19,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:19,700 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:19,700 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:19,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:19,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:19,703 WARN [BP-1083000352-172.17.0.2-1733213172129 heartbeating to localhost/127.0.0.1:35153 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T08:06:19,703 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T08:06:19,703 WARN [BP-1083000352-172.17.0.2-1733213172129 heartbeating to localhost/127.0.0.1:35153 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083000352-172.17.0.2-1733213172129 (Datanode Uuid 5c76adae-747e-4c0c-a478-02fbf3eb27c1) service to localhost/127.0.0.1:35153 2024-12-03T08:06:19,703 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T08:06:19,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data5/current/BP-1083000352-172.17.0.2-1733213172129 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:19,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data6/current/BP-1083000352-172.17.0.2-1733213172129 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:19,705 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T08:06:19,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:19,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:19,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:19,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:19,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:19,708 WARN [BP-1083000352-172.17.0.2-1733213172129 heartbeating to localhost/127.0.0.1:35153 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T08:06:19,709 WARN [BP-1083000352-172.17.0.2-1733213172129 heartbeating to localhost/127.0.0.1:35153 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083000352-172.17.0.2-1733213172129 (Datanode Uuid 21ef23be-79ca-4047-afc8-056a91c35486) service to localhost/127.0.0.1:35153 2024-12-03T08:06:19,708 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T08:06:19,709 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T08:06:19,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data3/current/BP-1083000352-172.17.0.2-1733213172129 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:19,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data4/current/BP-1083000352-172.17.0.2-1733213172129 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:19,710 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T08:06:19,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:19,712 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:19,712 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:19,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:19,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:19,713 WARN [BP-1083000352-172.17.0.2-1733213172129 heartbeating to localhost/127.0.0.1:35153 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T08:06:19,713 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T08:06:19,713 WARN [BP-1083000352-172.17.0.2-1733213172129 heartbeating to localhost/127.0.0.1:35153 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1083000352-172.17.0.2-1733213172129 (Datanode Uuid 1185b3ea-d0c6-4ebc-9f85-37b3c68f4cfc) service to localhost/127.0.0.1:35153 2024-12-03T08:06:19,713 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T08:06:19,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data1/current/BP-1083000352-172.17.0.2-1733213172129 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:19,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/cluster_176507d3-f554-c6fb-d45a-c2866ec12521/data/data2/current/BP-1083000352-172.17.0.2-1733213172129 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:19,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T08:06:19,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T08:06:19,723 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:19,723 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:19,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:19,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:19,733 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T08:06:19,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T08:06:19,767 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=90 (was 161), OpenFileDescriptor=437 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=69 (was 40) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8572 (was 8851) 2024-12-03T08:06:19,773 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=90, OpenFileDescriptor=437, MaxFileDescriptor=1048576, SystemLoadAverage=69, ProcessCount=11, AvailableMemoryMB=8571 2024-12-03T08:06:19,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T08:06:19,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.log.dir so I do NOT create it in target/test-data/5a41578b-e884-5793-a885-12779435a8f0 2024-12-03T08:06:19,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd314fe2-56e3-2c6c-d381-1b003466a961/hadoop.tmp.dir so I do NOT create it in target/test-data/5a41578b-e884-5793-a885-12779435a8f0 2024-12-03T08:06:19,773 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1, deleteOnExit=true 2024-12-03T08:06:19,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/test.cache.data in system properties and HBase conf 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir in system properties and HBase conf 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T08:06:19,774 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T08:06:19,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/nfs.dump.dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/java.io.tmpdir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T08:06:19,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T08:06:19,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:19,862 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:19,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:19,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:19,863 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T08:06:19,864 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:19,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e58533{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:19,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d952814{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:19,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41ad60e4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/java.io.tmpdir/jetty-localhost-44905-hadoop-hdfs-3_4_1-tests_jar-_-any-10550849778804551098/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T08:06:19,982 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4cf39bc8{HTTP/1.1, (http/1.1)}{localhost:44905} 2024-12-03T08:06:19,982 INFO [Time-limited test {}] server.Server(415): Started @9752ms 2024-12-03T08:06:20,057 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:20,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:20,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:20,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:20,062 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T08:06:20,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72f96008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:20,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45b09adf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:20,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16eaa68d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/java.io.tmpdir/jetty-localhost-36245-hadoop-hdfs-3_4_1-tests_jar-_-any-6128583023178025900/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:20,179 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9885f6c{HTTP/1.1, (http/1.1)}{localhost:36245} 2024-12-03T08:06:20,179 INFO [Time-limited test {}] server.Server(415): Started @9950ms 2024-12-03T08:06:20,181 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T08:06:20,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:20,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:20,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:20,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:20,220 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T08:06:20,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55cf3a01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:20,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e4c23ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:20,262 WARN [Thread-502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data1/current/BP-1691174847-172.17.0.2-1733213179807/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:20,262 WARN [Thread-503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data2/current/BP-1691174847-172.17.0.2-1733213179807/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:20,287 WARN [Thread-481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T08:06:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8938f5dc27e346b0 with lease ID 0x29fdefdb6ea08767: Processing first storage report for DS-5727e0fc-77e7-4511-8417-00017b3c5375 from datanode DatanodeRegistration(127.0.0.1:40797, datanodeUuid=ec9f2476-cbd7-4809-ad1c-193149059eb9, infoPort=43431, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807) 2024-12-03T08:06:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8938f5dc27e346b0 with lease ID 0x29fdefdb6ea08767: from storage DS-5727e0fc-77e7-4511-8417-00017b3c5375 node DatanodeRegistration(127.0.0.1:40797, datanodeUuid=ec9f2476-cbd7-4809-ad1c-193149059eb9, infoPort=43431, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T08:06:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8938f5dc27e346b0 with lease ID 0x29fdefdb6ea08767: Processing first storage report for DS-1cb34a3e-00b4-4b51-bc52-38106def0026 from datanode DatanodeRegistration(127.0.0.1:40797, datanodeUuid=ec9f2476-cbd7-4809-ad1c-193149059eb9, infoPort=43431, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807) 2024-12-03T08:06:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8938f5dc27e346b0 with lease ID 0x29fdefdb6ea08767: from storage DS-1cb34a3e-00b4-4b51-bc52-38106def0026 node DatanodeRegistration(127.0.0.1:40797, datanodeUuid=ec9f2476-cbd7-4809-ad1c-193149059eb9, infoPort=43431, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:20,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a04b23b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/java.io.tmpdir/jetty-localhost-34115-hadoop-hdfs-3_4_1-tests_jar-_-any-6240269756731687478/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:20,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ab72a47{HTTP/1.1, (http/1.1)}{localhost:34115} 2024-12-03T08:06:20,339 INFO [Time-limited test {}] server.Server(415): Started @10109ms 2024-12-03T08:06:20,340 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T08:06:20,368 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T08:06:20,371 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T08:06:20,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T08:06:20,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T08:06:20,372 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T08:06:20,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40b03519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,AVAILABLE} 2024-12-03T08:06:20,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38e5384{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T08:06:20,417 WARN [Thread-537 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data3/current/BP-1691174847-172.17.0.2-1733213179807/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:20,417 WARN [Thread-538 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data4/current/BP-1691174847-172.17.0.2-1733213179807/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:20,433 WARN [Thread-517 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T08:06:20,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec352006d0ea0801 with lease ID 0x29fdefdb6ea08768: Processing first storage report for DS-6e5dfbf3-364a-4a3a-afad-4f3ff086438b from datanode DatanodeRegistration(127.0.0.1:37929, datanodeUuid=cb3475ab-0227-4db2-a2f8-f4e454f5ea3d, infoPort=36213, infoSecurePort=0, ipcPort=37405, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807) 2024-12-03T08:06:20,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec352006d0ea0801 with lease ID 0x29fdefdb6ea08768: from storage DS-6e5dfbf3-364a-4a3a-afad-4f3ff086438b node DatanodeRegistration(127.0.0.1:37929, datanodeUuid=cb3475ab-0227-4db2-a2f8-f4e454f5ea3d, infoPort=36213, infoSecurePort=0, ipcPort=37405, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:20,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec352006d0ea0801 with lease ID 0x29fdefdb6ea08768: Processing first storage report for DS-e30acb57-e608-45ec-9cc2-66a09d7fdee5 from datanode DatanodeRegistration(127.0.0.1:37929, datanodeUuid=cb3475ab-0227-4db2-a2f8-f4e454f5ea3d, infoPort=36213, infoSecurePort=0, ipcPort=37405, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807) 2024-12-03T08:06:20,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec352006d0ea0801 with lease ID 0x29fdefdb6ea08768: from storage DS-e30acb57-e608-45ec-9cc2-66a09d7fdee5 node DatanodeRegistration(127.0.0.1:37929, datanodeUuid=cb3475ab-0227-4db2-a2f8-f4e454f5ea3d, infoPort=36213, infoSecurePort=0, ipcPort=37405, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:20,492 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@190e176c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/java.io.tmpdir/jetty-localhost-40235-hadoop-hdfs-3_4_1-tests_jar-_-any-48657387252404330/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:20,493 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14e9278c{HTTP/1.1, (http/1.1)}{localhost:40235} 2024-12-03T08:06:20,493 INFO [Time-limited test {}] server.Server(415): Started @10264ms 2024-12-03T08:06:20,495 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T08:06:20,579 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data6/current/BP-1691174847-172.17.0.2-1733213179807/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:20,579 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data5/current/BP-1691174847-172.17.0.2-1733213179807/current, will proceed with Du for space computation calculation, 2024-12-03T08:06:20,602 WARN [Thread-552 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T08:06:20,605 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d2429beed74d10 with lease ID 0x29fdefdb6ea08769: Processing first storage report for DS-ff659f5e-79f2-4fbb-8d2c-04c8077ee1e1 from datanode DatanodeRegistration(127.0.0.1:41749, datanodeUuid=2209e16f-937f-4fe9-93e3-d44bcf16f7a2, infoPort=45875, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807) 2024-12-03T08:06:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d2429beed74d10 with lease ID 0x29fdefdb6ea08769: from storage DS-ff659f5e-79f2-4fbb-8d2c-04c8077ee1e1 node DatanodeRegistration(127.0.0.1:41749, datanodeUuid=2209e16f-937f-4fe9-93e3-d44bcf16f7a2, infoPort=45875, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d2429beed74d10 with lease ID 0x29fdefdb6ea08769: Processing first storage report for DS-eaef04b7-3793-448c-a2f3-171b377d9dbd from datanode DatanodeRegistration(127.0.0.1:41749, datanodeUuid=2209e16f-937f-4fe9-93e3-d44bcf16f7a2, infoPort=45875, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807) 2024-12-03T08:06:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d2429beed74d10 with lease ID 0x29fdefdb6ea08769: from storage DS-eaef04b7-3793-448c-a2f3-171b377d9dbd node DatanodeRegistration(127.0.0.1:41749, datanodeUuid=2209e16f-937f-4fe9-93e3-d44bcf16f7a2, infoPort=45875, infoSecurePort=0, ipcPort=35709, storageInfo=lv=-57;cid=testClusterID;nsid=952041422;c=1733213179807), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T08:06:20,627 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0 2024-12-03T08:06:20,630 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/zookeeper_0, clientPort=49329, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T08:06:20,631 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49329 2024-12-03T08:06:20,631 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741825_1001 (size=7) 2024-12-03T08:06:20,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741825_1001 (size=7) 2024-12-03T08:06:20,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741825_1001 (size=7) 2024-12-03T08:06:20,647 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2 with version=8 2024-12-03T08:06:20,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35153/user/jenkins/test-data/65f73c47-e105-01b3-9695-4a4b3bf7d2fc/hbase-staging 2024-12-03T08:06:20,650 INFO [Time-limited test {}] client.ConnectionUtils(128): master/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T08:06:20,650 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:20,651 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41243 2024-12-03T08:06:20,653 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41243 connecting to ZooKeeper ensemble=127.0.0.1:49329 2024-12-03T08:06:20,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412430x0, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:20,658 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41243-0x101522147800000 connected 2024-12-03T08:06:20,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:20,676 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2, hbase.cluster.distributed=false 2024-12-03T08:06:20,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:20,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41243 2024-12-03T08:06:20,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41243 2024-12-03T08:06:20,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41243 2024-12-03T08:06:20,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41243 2024-12-03T08:06:20,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41243 2024-12-03T08:06:20,694 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T08:06:20,694 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:20,695 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39943 2024-12-03T08:06:20,696 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39943 connecting to ZooKeeper ensemble=127.0.0.1:49329 2024-12-03T08:06:20,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399430x0, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:20,703 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:399430x0, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:20,703 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39943-0x101522147800001 connected 2024-12-03T08:06:20,704 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T08:06:20,704 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T08:06:20,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T08:06:20,706 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:20,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39943 2024-12-03T08:06:20,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39943 2024-12-03T08:06:20,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39943 2024-12-03T08:06:20,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39943 2024-12-03T08:06:20,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39943 2024-12-03T08:06:20,722 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:20,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,722 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:20,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:20,723 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T08:06:20,723 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:20,723 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38851 2024-12-03T08:06:20,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38851 connecting to ZooKeeper ensemble=127.0.0.1:49329 2024-12-03T08:06:20,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388510x0, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:20,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38851-0x101522147800002 connected 2024-12-03T08:06:20,731 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:20,731 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T08:06:20,732 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T08:06:20,733 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T08:06:20,734 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:20,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38851 2024-12-03T08:06:20,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38851 2024-12-03T08:06:20,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38851 2024-12-03T08:06:20,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38851 2024-12-03T08:06:20,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38851 2024-12-03T08:06:20,749 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/911db94732f6:0 server-side Connection retries=45 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T08:06:20,749 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T08:06:20,750 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39409 2024-12-03T08:06:20,751 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39409 connecting to ZooKeeper ensemble=127.0.0.1:49329 2024-12-03T08:06:20,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394090x0, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T08:06:20,758 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39409-0x101522147800003 connected 2024-12-03T08:06:20,758 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:20,758 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T08:06:20,759 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T08:06:20,760 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T08:06:20,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T08:06:20,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39409 2024-12-03T08:06:20,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39409 2024-12-03T08:06:20,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39409 2024-12-03T08:06:20,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39409 2024-12-03T08:06:20,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39409 2024-12-03T08:06:20,773 DEBUG [M:0;911db94732f6:41243 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;911db94732f6:41243 2024-12-03T08:06:20,773 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/911db94732f6,41243,1733213180649 2024-12-03T08:06:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,776 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/911db94732f6,41243,1733213180649 2024-12-03T08:06:20,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:20,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:20,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:20,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,778 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T08:06:20,779 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/911db94732f6,41243,1733213180649 from backup master directory 2024-12-03T08:06:20,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/911db94732f6,41243,1733213180649 2024-12-03T08:06:20,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,780 WARN [master/911db94732f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:20,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T08:06:20,780 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=911db94732f6,41243,1733213180649 2024-12-03T08:06:20,787 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/hbase.id] with ID: 18777d6e-208f-4ef7-ba20-af51ba08c46a 2024-12-03T08:06:20,787 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/.tmp/hbase.id 2024-12-03T08:06:20,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741826_1002 (size=42) 2024-12-03T08:06:20,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741826_1002 (size=42) 2024-12-03T08:06:20,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741826_1002 (size=42) 2024-12-03T08:06:20,798 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/.tmp/hbase.id]:[hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/hbase.id] 2024-12-03T08:06:20,815 INFO [master/911db94732f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T08:06:20,815 INFO [master/911db94732f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T08:06:20,817 INFO [master/911db94732f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T08:06:20,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741827_1003 (size=196) 2024-12-03T08:06:20,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741827_1003 (size=196) 2024-12-03T08:06:20,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741827_1003 (size=196) 2024-12-03T08:06:20,831 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T08:06:20,832 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T08:06:20,832 INFO [master/911db94732f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T08:06:20,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741828_1004 (size=1189) 2024-12-03T08:06:20,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741828_1004 (size=1189) 2024-12-03T08:06:20,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741828_1004 (size=1189) 2024-12-03T08:06:20,845 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store 2024-12-03T08:06:20,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741829_1005 (size=34) 2024-12-03T08:06:20,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741829_1005 (size=34) 2024-12-03T08:06:20,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741829_1005 (size=34) 2024-12-03T08:06:20,855 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:20,855 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T08:06:20,855 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:20,855 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:20,855 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T08:06:20,856 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:20,856 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:20,856 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733213180855Disabling compacts and flushes for region at 1733213180855Disabling writes for close at 1733213180855Writing region close event to WAL at 1733213180856 (+1 ms)Closed at 1733213180856 2024-12-03T08:06:20,857 WARN [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/.initializing 2024-12-03T08:06:20,857 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/WALs/911db94732f6,41243,1733213180649 2024-12-03T08:06:20,861 INFO [master/911db94732f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C41243%2C1733213180649, suffix=, logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/WALs/911db94732f6,41243,1733213180649, archiveDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/oldWALs, maxLogs=10 2024-12-03T08:06:20,862 INFO [master/911db94732f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 911db94732f6%2C41243%2C1733213180649.1733213180861 2024-12-03T08:06:20,871 INFO [master/911db94732f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/WALs/911db94732f6,41243,1733213180649/911db94732f6%2C41243%2C1733213180649.1733213180861 2024-12-03T08:06:20,873 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45875:45875),(127.0.0.1/127.0.0.1:43431:43431),(127.0.0.1/127.0.0.1:36213:36213)] 2024-12-03T08:06:20,876 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T08:06:20,877 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:20,877 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,877 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T08:06:20,882 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:20,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:20,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T08:06:20,885 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:20,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:20,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T08:06:20,888 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:20,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:20,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T08:06:20,891 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:20,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:20,892 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,892 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,893 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,895 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,895 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,896 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T08:06:20,897 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T08:06:20,900 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T08:06:20,900 INFO [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75060352, jitterRate=0.11848640441894531}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T08:06:20,901 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733213180877Initializing all the Stores at 1733213180878 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213180878Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213180880 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213180881 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213180881Cleaning up temporary data from old regions at 1733213180895 (+14 ms)Region opened successfully at 1733213180901 (+6 ms) 2024-12-03T08:06:20,901 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T08:06:20,906 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f434dce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:20,907 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T08:06:20,907 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T08:06:20,907 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T08:06:20,907 INFO [master/911db94732f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T08:06:20,907 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T08:06:20,908 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T08:06:20,908 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T08:06:20,910 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T08:06:20,911 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T08:06:20,913 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T08:06:20,913 INFO [master/911db94732f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T08:06:20,914 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T08:06:20,915 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T08:06:20,915 INFO [master/911db94732f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T08:06:20,916 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T08:06:20,917 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T08:06:20,918 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T08:06:20,919 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T08:06:20,921 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T08:06:20,922 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T08:06:20,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:20,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:20,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:20,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:20,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,925 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=911db94732f6,41243,1733213180649, sessionid=0x101522147800000, setting cluster-up flag (Was=false) 2024-12-03T08:06:20,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,933 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T08:06:20,934 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=911db94732f6,41243,1733213180649 2024-12-03T08:06:20,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:20,942 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T08:06:20,943 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=911db94732f6,41243,1733213180649 2024-12-03T08:06:20,944 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T08:06:20,947 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:20,947 INFO [master/911db94732f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T08:06:20,947 INFO [master/911db94732f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T08:06:20,948 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 911db94732f6,41243,1733213180649 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/911db94732f6:0, corePoolSize=5, maxPoolSize=5 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/911db94732f6:0, corePoolSize=10, maxPoolSize=10 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:20,949 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:20,950 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:20,952 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:20,952 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T08:06:20,953 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:20,953 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T08:06:20,960 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733213210960 2024-12-03T08:06:20,960 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:20,961 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T08:06:20,962 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T08:06:20,962 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T08:06:20,965 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T08:06:20,965 INFO [master/911db94732f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T08:06:20,966 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.large.0-1733213180965,5,FailOnTimeoutGroup] 2024-12-03T08:06:20,966 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.small.0-1733213180966,5,FailOnTimeoutGroup] 2024-12-03T08:06:20,966 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:20,966 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T08:06:20,966 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:20,966 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:20,967 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(746): ClusterId : 18777d6e-208f-4ef7-ba20-af51ba08c46a 2024-12-03T08:06:20,967 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T08:06:20,967 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(746): ClusterId : 18777d6e-208f-4ef7-ba20-af51ba08c46a 2024-12-03T08:06:20,967 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T08:06:20,968 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(746): ClusterId : 18777d6e-208f-4ef7-ba20-af51ba08c46a 2024-12-03T08:06:20,968 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T08:06:20,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741831_1007 (size=1321) 2024-12-03T08:06:20,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741831_1007 (size=1321) 2024-12-03T08:06:20,971 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T08:06:20,971 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T08:06:20,971 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T08:06:20,971 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T08:06:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741831_1007 (size=1321) 2024-12-03T08:06:20,973 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T08:06:20,973 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T08:06:20,973 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2 2024-12-03T08:06:20,973 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T08:06:20,974 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T08:06:20,974 DEBUG [RS:1;911db94732f6:38851 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c25ecf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:20,975 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T08:06:20,975 DEBUG [RS:0;911db94732f6:39943 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184a4c80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:20,976 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T08:06:20,976 DEBUG [RS:2;911db94732f6:39409 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b16eb61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=911db94732f6/172.17.0.2:0 2024-12-03T08:06:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741832_1008 (size=32) 2024-12-03T08:06:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741832_1008 (size=32) 2024-12-03T08:06:20,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741832_1008 (size=32) 2024-12-03T08:06:20,991 DEBUG [RS:1;911db94732f6:38851 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;911db94732f6:38851 2024-12-03T08:06:20,991 INFO [RS:1;911db94732f6:38851 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T08:06:20,991 INFO [RS:1;911db94732f6:38851 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T08:06:20,991 DEBUG [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T08:06:20,991 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,41243,1733213180649 with port=38851, startcode=1733213180722 2024-12-03T08:06:20,992 DEBUG [RS:1;911db94732f6:38851 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T08:06:20,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:20,994 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;911db94732f6:39943 2024-12-03T08:06:20,994 INFO [RS:0;911db94732f6:39943 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T08:06:20,994 INFO [RS:0;911db94732f6:39943 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T08:06:20,994 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T08:06:20,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T08:06:20,995 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38187, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T08:06:20,995 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,41243,1733213180649 with port=39943, startcode=1733213180694 2024-12-03T08:06:20,995 DEBUG [RS:0;911db94732f6:39943 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T08:06:20,996 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;911db94732f6:39409 2024-12-03T08:06:20,996 INFO [RS:2;911db94732f6:39409 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T08:06:20,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 911db94732f6,38851,1733213180722 2024-12-03T08:06:20,996 INFO [RS:2;911db94732f6:39409 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T08:06:20,996 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T08:06:20,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41243 {}] master.ServerManager(517): Registering regionserver=911db94732f6,38851,1733213180722 2024-12-03T08:06:20,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T08:06:20,997 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:20,997 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(2659): reportForDuty to master=911db94732f6,41243,1733213180649 with port=39409, startcode=1733213180749 2024-12-03T08:06:20,997 DEBUG [RS:2;911db94732f6:39409 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T08:06:20,999 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43261, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T08:06:20,999 DEBUG [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2 2024-12-03T08:06:20,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:20,999 DEBUG [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40423 2024-12-03T08:06:20,999 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52349, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T08:06:20,999 DEBUG [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T08:06:20,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T08:06:20,999 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 911db94732f6,39943,1733213180694 2024-12-03T08:06:20,999 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41243 {}] master.ServerManager(517): Registering regionserver=911db94732f6,39943,1733213180694 2024-12-03T08:06:21,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:21,001 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T08:06:21,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,001 DEBUG [RS:1;911db94732f6:38851 {}] zookeeper.ZKUtil(111): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/911db94732f6,38851,1733213180722 2024-12-03T08:06:21,001 WARN [RS:1;911db94732f6:38851 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:21,001 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 911db94732f6,39409,1733213180749 2024-12-03T08:06:21,001 INFO [RS:1;911db94732f6:38851 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T08:06:21,002 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41243 {}] master.ServerManager(517): Registering regionserver=911db94732f6,39409,1733213180749 2024-12-03T08:06:21,002 DEBUG [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,38851,1733213180722 2024-12-03T08:06:21,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T08:06:21,002 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2 2024-12-03T08:06:21,002 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40423 2024-12-03T08:06:21,002 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T08:06:21,003 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [911db94732f6,38851,1733213180722] 2024-12-03T08:06:21,004 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T08:06:21,004 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2 2024-12-03T08:06:21,004 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,004 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40423 2024-12-03T08:06:21,004 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T08:06:21,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T08:06:21,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:21,007 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T08:06:21,007 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,008 DEBUG [RS:0;911db94732f6:39943 {}] zookeeper.ZKUtil(111): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/911db94732f6,39943,1733213180694 2024-12-03T08:06:21,008 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [911db94732f6,39943,1733213180694] 2024-12-03T08:06:21,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,008 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [911db94732f6,39409,1733213180749] 2024-12-03T08:06:21,008 WARN [RS:0;911db94732f6:39943 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:21,008 DEBUG [RS:2;911db94732f6:39409 {}] zookeeper.ZKUtil(111): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/911db94732f6,39409,1733213180749 2024-12-03T08:06:21,008 INFO [RS:0;911db94732f6:39943 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T08:06:21,008 WARN [RS:2;911db94732f6:39409 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T08:06:21,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T08:06:21,008 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39943,1733213180694 2024-12-03T08:06:21,008 INFO [RS:2;911db94732f6:39409 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T08:06:21,009 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39409,1733213180749 2024-12-03T08:06:21,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740 2024-12-03T08:06:21,010 INFO [RS:1;911db94732f6:38851 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T08:06:21,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740 2024-12-03T08:06:21,012 INFO [RS:1;911db94732f6:38851 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T08:06:21,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T08:06:21,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T08:06:21,013 INFO [RS:1;911db94732f6:38851 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T08:06:21,013 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,013 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T08:06:21,013 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T08:06:21,014 INFO [RS:0;911db94732f6:39943 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T08:06:21,016 INFO [RS:0;911db94732f6:39943 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T08:06:21,016 INFO [RS:2;911db94732f6:39409 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T08:06:21,016 INFO [RS:0;911db94732f6:39943 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T08:06:21,016 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,017 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T08:06:21,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T08:06:21,018 INFO [RS:0;911db94732f6:39943 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T08:06:21,018 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,018 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,018 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,018 INFO [RS:2;911db94732f6:39409 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T08:06:21,018 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,018 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,018 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:21,019 DEBUG [RS:0;911db94732f6:39943 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:21,019 INFO [RS:2;911db94732f6:39409 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T08:06:21,019 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,020 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T08:06:21,020 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T08:06:21,020 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,020 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,021 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,021 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,021 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60922861, jitterRate=-0.09217862784862518}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T08:06:21,021 INFO [RS:2;911db94732f6:39409 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T08:06:21,021 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,021 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,021 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,39943,1733213180694-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:21,021 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,021 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,021 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,021 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,021 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733213180993Initializing all the Stores at 1733213180994 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213180994Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213180995 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213180995Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213180995Cleaning up temporary data from old regions at 1733213181013 (+18 ms)Region opened successfully at 1733213181022 (+9 ms) 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T08:06:21,022 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T08:06:21,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:21,022 DEBUG [RS:2;911db94732f6:39409 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:21,022 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T08:06:21,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733213181022Disabling compacts and flushes for region at 1733213181022Disabling writes for close at 1733213181022Writing region close event to WAL at 1733213181022Closed at 1733213181022 2024-12-03T08:06:21,023 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,023 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,023 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,023 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,023 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,023 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,39409,1733213180749-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:21,024 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:21,024 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T08:06:21,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T08:06:21,025 INFO [RS:1;911db94732f6:38851 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T08:06:21,025 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/911db94732f6:0, corePoolSize=2, maxPoolSize=2 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,026 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/911db94732f6:0, corePoolSize=1, maxPoolSize=1 2024-12-03T08:06:21,027 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:21,027 DEBUG [RS:1;911db94732f6:38851 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0, corePoolSize=3, maxPoolSize=3 2024-12-03T08:06:21,027 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T08:06:21,027 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,027 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,027 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,027 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,028 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,028 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,38851,1733213180722-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:21,029 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T08:06:21,040 INFO [RS:2;911db94732f6:39409 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T08:06:21,040 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,39409,1733213180749-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,041 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,041 INFO [RS:2;911db94732f6:39409 {}] regionserver.Replication(171): 911db94732f6,39409,1733213180749 started 2024-12-03T08:06:21,042 INFO [RS:1;911db94732f6:38851 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T08:06:21,042 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,38851,1733213180722-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,043 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,043 INFO [RS:1;911db94732f6:38851 {}] regionserver.Replication(171): 911db94732f6,38851,1733213180722 started 2024-12-03T08:06:21,043 INFO [RS:0;911db94732f6:39943 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T08:06:21,044 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,39943,1733213180694-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,044 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,044 INFO [RS:0;911db94732f6:39943 {}] regionserver.Replication(171): 911db94732f6,39943,1733213180694 started 2024-12-03T08:06:21,054 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,055 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1482): Serving as 911db94732f6,39409,1733213180749, RpcServer on 911db94732f6/172.17.0.2:39409, sessionid=0x101522147800003 2024-12-03T08:06:21,055 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T08:06:21,055 DEBUG [RS:2;911db94732f6:39409 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 911db94732f6,39409,1733213180749 2024-12-03T08:06:21,055 DEBUG [RS:2;911db94732f6:39409 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,39409,1733213180749' 2024-12-03T08:06:21,055 DEBUG [RS:2;911db94732f6:39409 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T08:06:21,055 DEBUG [RS:2;911db94732f6:39409 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T08:06:21,056 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T08:06:21,056 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T08:06:21,056 DEBUG [RS:2;911db94732f6:39409 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 911db94732f6,39409,1733213180749 2024-12-03T08:06:21,056 DEBUG [RS:2;911db94732f6:39409 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,39409,1733213180749' 2024-12-03T08:06:21,056 DEBUG [RS:2;911db94732f6:39409 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T08:06:21,056 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,057 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(1482): Serving as 911db94732f6,38851,1733213180722, RpcServer on 911db94732f6/172.17.0.2:38851, sessionid=0x101522147800002 2024-12-03T08:06:21,057 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T08:06:21,057 DEBUG [RS:1;911db94732f6:38851 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 911db94732f6,38851,1733213180722 2024-12-03T08:06:21,057 DEBUG [RS:2;911db94732f6:39409 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T08:06:21,057 DEBUG [RS:1;911db94732f6:38851 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,38851,1733213180722' 2024-12-03T08:06:21,057 DEBUG [RS:1;911db94732f6:38851 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T08:06:21,057 DEBUG [RS:2;911db94732f6:39409 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T08:06:21,057 INFO [RS:2;911db94732f6:39409 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T08:06:21,057 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,057 INFO [RS:2;911db94732f6:39409 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T08:06:21,057 DEBUG [RS:1;911db94732f6:38851 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T08:06:21,057 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1482): Serving as 911db94732f6,39943,1733213180694, RpcServer on 911db94732f6/172.17.0.2:39943, sessionid=0x101522147800001 2024-12-03T08:06:21,058 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T08:06:21,058 DEBUG [RS:0;911db94732f6:39943 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 911db94732f6,39943,1733213180694 2024-12-03T08:06:21,058 DEBUG [RS:0;911db94732f6:39943 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,39943,1733213180694' 2024-12-03T08:06:21,058 DEBUG [RS:0;911db94732f6:39943 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T08:06:21,058 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T08:06:21,058 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T08:06:21,058 DEBUG [RS:1;911db94732f6:38851 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 911db94732f6,38851,1733213180722 2024-12-03T08:06:21,058 DEBUG [RS:1;911db94732f6:38851 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,38851,1733213180722' 2024-12-03T08:06:21,058 DEBUG [RS:1;911db94732f6:38851 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T08:06:21,058 DEBUG [RS:0;911db94732f6:39943 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T08:06:21,059 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T08:06:21,059 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T08:06:21,059 DEBUG [RS:1;911db94732f6:38851 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T08:06:21,059 DEBUG [RS:0;911db94732f6:39943 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 911db94732f6,39943,1733213180694 2024-12-03T08:06:21,059 DEBUG [RS:0;911db94732f6:39943 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '911db94732f6,39943,1733213180694' 2024-12-03T08:06:21,059 DEBUG [RS:0;911db94732f6:39943 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T08:06:21,059 DEBUG [RS:1;911db94732f6:38851 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T08:06:21,059 DEBUG [RS:0;911db94732f6:39943 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T08:06:21,059 INFO [RS:1;911db94732f6:38851 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T08:06:21,059 INFO [RS:1;911db94732f6:38851 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T08:06:21,060 DEBUG [RS:0;911db94732f6:39943 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T08:06:21,060 INFO [RS:0;911db94732f6:39943 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T08:06:21,060 INFO [RS:0;911db94732f6:39943 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T08:06:21,160 INFO [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C39409%2C1733213180749, suffix=, logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39409,1733213180749, archiveDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs, maxLogs=32 2024-12-03T08:06:21,162 INFO [RS:1;911db94732f6:38851 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C38851%2C1733213180722, suffix=, logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,38851,1733213180722, archiveDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs, maxLogs=32 2024-12-03T08:06:21,162 INFO [RS:2;911db94732f6:39409 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 911db94732f6%2C39409%2C1733213180749.1733213181162 2024-12-03T08:06:21,162 INFO [RS:0;911db94732f6:39943 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C39943%2C1733213180694, suffix=, logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39943,1733213180694, archiveDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs, maxLogs=32 2024-12-03T08:06:21,164 INFO [RS:0;911db94732f6:39943 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 911db94732f6%2C39943%2C1733213180694.1733213181164 2024-12-03T08:06:21,164 INFO [RS:1;911db94732f6:38851 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 911db94732f6%2C38851%2C1733213180722.1733213181164 2024-12-03T08:06:21,175 INFO [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39409,1733213180749/911db94732f6%2C39409%2C1733213180749.1733213181162 2024-12-03T08:06:21,177 DEBUG [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43431:43431),(127.0.0.1/127.0.0.1:45875:45875),(127.0.0.1/127.0.0.1:36213:36213)] 2024-12-03T08:06:21,178 INFO [RS:0;911db94732f6:39943 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39943,1733213180694/911db94732f6%2C39943%2C1733213180694.1733213181164 2024-12-03T08:06:21,178 INFO [RS:1;911db94732f6:38851 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,38851,1733213180722/911db94732f6%2C38851%2C1733213180722.1733213181164 2024-12-03T08:06:21,180 WARN [911db94732f6:41243 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T08:06:21,187 DEBUG [RS:0;911db94732f6:39943 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43431:43431),(127.0.0.1/127.0.0.1:36213:36213),(127.0.0.1/127.0.0.1:45875:45875)] 2024-12-03T08:06:21,188 DEBUG [RS:1;911db94732f6:38851 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45875:45875),(127.0.0.1/127.0.0.1:43431:43431),(127.0.0.1/127.0.0.1:36213:36213)] 2024-12-03T08:06:21,430 DEBUG [911db94732f6:41243 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-03T08:06:21,430 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(204): Hosts are {911db94732f6=0} racks are {/default-rack=0} 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T08:06:21,433 INFO [911db94732f6:41243 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T08:06:21,433 INFO [911db94732f6:41243 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T08:06:21,433 INFO [911db94732f6:41243 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T08:06:21,433 DEBUG [911db94732f6:41243 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T08:06:21,434 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=911db94732f6,39409,1733213180749 2024-12-03T08:06:21,436 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 911db94732f6,39409,1733213180749, state=OPENING 2024-12-03T08:06:21,438 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T08:06:21,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:21,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:21,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:21,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:21,440 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,440 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,440 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T08:06:21,441 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=911db94732f6,39409,1733213180749}] 2024-12-03T08:06:21,441 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,595 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T08:06:21,596 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55017, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T08:06:21,602 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T08:06:21,602 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T08:06:21,605 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=911db94732f6%2C39409%2C1733213180749.meta, suffix=.meta, logDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39409,1733213180749, archiveDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs, maxLogs=32 2024-12-03T08:06:21,606 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 911db94732f6%2C39409%2C1733213180749.meta.1733213181605.meta 2024-12-03T08:06:21,613 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/WALs/911db94732f6,39409,1733213180749/911db94732f6%2C39409%2C1733213180749.meta.1733213181605.meta 2024-12-03T08:06:21,617 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43431:43431),(127.0.0.1/127.0.0.1:36213:36213),(127.0.0.1/127.0.0.1:45875:45875)] 2024-12-03T08:06:21,618 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T08:06:21,618 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T08:06:21,618 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T08:06:21,618 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T08:06:21,618 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T08:06:21,619 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:21,619 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T08:06:21,619 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T08:06:21,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T08:06:21,622 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T08:06:21,622 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T08:06:21,624 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T08:06:21,624 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T08:06:21,626 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T08:06:21,626 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T08:06:21,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T08:06:21,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T08:06:21,628 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T08:06:21,629 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740 2024-12-03T08:06:21,630 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740 2024-12-03T08:06:21,632 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T08:06:21,632 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T08:06:21,633 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T08:06:21,634 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T08:06:21,635 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74140938, jitterRate=0.10478606820106506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T08:06:21,635 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T08:06:21,636 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733213181619Writing region info on filesystem at 1733213181619Initializing all the Stores at 1733213181620 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213181620Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213181620Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213181620Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733213181620Cleaning up temporary data from old regions at 1733213181632 (+12 ms)Running coprocessor post-open hooks at 1733213181635 (+3 ms)Region opened successfully at 1733213181636 (+1 ms) 2024-12-03T08:06:21,638 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733213181595 2024-12-03T08:06:21,641 DEBUG [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T08:06:21,641 INFO [RS_OPEN_META-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T08:06:21,642 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=911db94732f6,39409,1733213180749 2024-12-03T08:06:21,644 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 911db94732f6,39409,1733213180749, state=OPEN 2024-12-03T08:06:21,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:21,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:21,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:21,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T08:06:21,646 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,646 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,646 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=911db94732f6,39409,1733213180749 2024-12-03T08:06:21,646 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,646 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T08:06:21,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T08:06:21,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=911db94732f6,39409,1733213180749 in 206 msec 2024-12-03T08:06:21,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T08:06:21,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 626 msec 2024-12-03T08:06:21,655 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T08:06:21,655 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T08:06:21,657 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T08:06:21,657 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=911db94732f6,39409,1733213180749, seqNum=-1] 2024-12-03T08:06:21,658 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T08:06:21,659 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40753, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T08:06:21,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 718 msec 2024-12-03T08:06:21,666 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733213181666, completionTime=-1 2024-12-03T08:06:21,666 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-03T08:06:21,666 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T08:06:21,668 INFO [master/911db94732f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-03T08:06:21,668 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733213241668 2024-12-03T08:06:21,668 INFO [master/911db94732f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733213301668 2024-12-03T08:06:21,668 INFO [master/911db94732f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T08:06:21,669 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,41243,1733213180649-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,669 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,41243,1733213180649-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,669 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,41243,1733213180649-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,669 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-911db94732f6:41243, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,669 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,669 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,672 DEBUG [master/911db94732f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.894sec 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,41243,1733213180649-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T08:06:21,674 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,41243,1733213180649-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T08:06:21,677 DEBUG [master/911db94732f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T08:06:21,677 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T08:06:21,677 INFO [master/911db94732f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=911db94732f6,41243,1733213180649-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T08:06:21,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61e12713, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T08:06:21,768 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 911db94732f6,41243,-1 for getting cluster id 2024-12-03T08:06:21,768 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T08:06:21,770 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '18777d6e-208f-4ef7-ba20-af51ba08c46a' 2024-12-03T08:06:21,770 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T08:06:21,770 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "18777d6e-208f-4ef7-ba20-af51ba08c46a" 2024-12-03T08:06:21,771 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb9a9ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T08:06:21,771 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [911db94732f6,41243,-1] 2024-12-03T08:06:21,771 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T08:06:21,771 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:21,773 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T08:06:21,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb57756, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T08:06:21,774 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T08:06:21,775 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=911db94732f6,39409,1733213180749, seqNum=-1] 2024-12-03T08:06:21,776 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T08:06:21,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T08:06:21,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=911db94732f6,41243,1733213180649 2024-12-03T08:06:21,780 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T08:06:21,781 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 911db94732f6,41243,1733213180649 2024-12-03T08:06:21,781 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a094df2 2024-12-03T08:06:21,782 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T08:06:21,783 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58958, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T08:06:21,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T08:06:21,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-03T08:06:21,787 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T08:06:21,788 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:21,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-03T08:06:21,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:21,789 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T08:06:21,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741837_1013 (size=392) 2024-12-03T08:06:21,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741837_1013 (size=392) 2024-12-03T08:06:21,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741837_1013 (size=392) 2024-12-03T08:06:21,801 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 584250ce97fa16ae89a31211cb2b8d02, NAME => 'TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2 2024-12-03T08:06:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741838_1014 (size=51) 2024-12-03T08:06:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741838_1014 (size=51) 2024-12-03T08:06:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741838_1014 (size=51) 2024-12-03T08:06:21,813 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:21,813 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 584250ce97fa16ae89a31211cb2b8d02, disabling compactions & flushes 2024-12-03T08:06:21,813 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:21,813 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:21,813 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. after waiting 0 ms 2024-12-03T08:06:21,813 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:21,813 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:21,813 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 584250ce97fa16ae89a31211cb2b8d02: Waiting for close lock at 1733213181813Disabling compacts and flushes for region at 1733213181813Disabling writes for close at 1733213181813Writing region close event to WAL at 1733213181813Closed at 1733213181813 2024-12-03T08:06:21,815 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T08:06:21,815 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733213181815"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733213181815"}]},"ts":"1733213181815"} 2024-12-03T08:06:21,818 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T08:06:21,820 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T08:06:21,820 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733213181820"}]},"ts":"1733213181820"} 2024-12-03T08:06:21,822 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-03T08:06:21,823 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {911db94732f6=0} racks are {/default-rack=0} 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T08:06:21,824 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T08:06:21,824 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T08:06:21,824 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T08:06:21,824 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T08:06:21,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=584250ce97fa16ae89a31211cb2b8d02, ASSIGN}] 2024-12-03T08:06:21,826 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=584250ce97fa16ae89a31211cb2b8d02, ASSIGN 2024-12-03T08:06:21,827 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=584250ce97fa16ae89a31211cb2b8d02, ASSIGN; state=OFFLINE, location=911db94732f6,39943,1733213180694; forceNewPlan=false, retain=false 2024-12-03T08:06:21,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:21,978 INFO [911db94732f6:41243 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T08:06:21,978 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=584250ce97fa16ae89a31211cb2b8d02, regionState=OPENING, regionLocation=911db94732f6,39943,1733213180694 2024-12-03T08:06:21,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=584250ce97fa16ae89a31211cb2b8d02, ASSIGN because future has completed 2024-12-03T08:06:21,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 584250ce97fa16ae89a31211cb2b8d02, server=911db94732f6,39943,1733213180694}] 2024-12-03T08:06:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:22,136 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T08:06:22,138 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45675, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T08:06:22,142 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,143 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 584250ce97fa16ae89a31211cb2b8d02, NAME => 'TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02.', STARTKEY => '', ENDKEY => ''} 2024-12-03T08:06:22,143 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,143 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T08:06:22,143 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,143 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,145 INFO [StoreOpener-584250ce97fa16ae89a31211cb2b8d02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,147 INFO [StoreOpener-584250ce97fa16ae89a31211cb2b8d02-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 584250ce97fa16ae89a31211cb2b8d02 columnFamilyName cf 2024-12-03T08:06:22,147 DEBUG [StoreOpener-584250ce97fa16ae89a31211cb2b8d02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T08:06:22,148 INFO [StoreOpener-584250ce97fa16ae89a31211cb2b8d02-1 {}] regionserver.HStore(327): Store=584250ce97fa16ae89a31211cb2b8d02/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T08:06:22,148 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,148 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,149 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,149 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,149 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,151 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,154 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T08:06:22,154 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 584250ce97fa16ae89a31211cb2b8d02; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62571759, jitterRate=-0.06760813295841217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T08:06:22,155 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,155 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 584250ce97fa16ae89a31211cb2b8d02: Running coprocessor pre-open hook at 1733213182143Writing region info on filesystem at 1733213182143Initializing all the Stores at 1733213182145 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733213182145Cleaning up temporary data from old regions at 1733213182149 (+4 ms)Running coprocessor post-open hooks at 1733213182155 (+6 ms)Region opened successfully at 1733213182155 2024-12-03T08:06:22,156 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02., pid=6, masterSystemTime=1733213182136 2024-12-03T08:06:22,159 DEBUG [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,159 INFO [RS_OPEN_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,161 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=584250ce97fa16ae89a31211cb2b8d02, regionState=OPEN, openSeqNum=2, regionLocation=911db94732f6,39943,1733213180694 2024-12-03T08:06:22,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 584250ce97fa16ae89a31211cb2b8d02, server=911db94732f6,39943,1733213180694 because future has completed 2024-12-03T08:06:22,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T08:06:22,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 584250ce97fa16ae89a31211cb2b8d02, server=911db94732f6,39943,1733213180694 in 184 msec 2024-12-03T08:06:22,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T08:06:22,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=584250ce97fa16ae89a31211cb2b8d02, ASSIGN in 345 msec 2024-12-03T08:06:22,174 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T08:06:22,175 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733213182175"}]},"ts":"1733213182175"} 2024-12-03T08:06:22,177 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-03T08:06:22,179 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T08:06:22,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 395 msec 2024-12-03T08:06:22,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T08:06:22,418 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T08:06:22,418 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-03T08:06:22,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T08:06:22,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-03T08:06:22,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T08:06:22,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-03T08:06:22,426 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02., hostname=911db94732f6,39943,1733213180694, seqNum=2] 2024-12-03T08:06:22,427 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T08:06:22,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T08:06:22,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-03T08:06:22,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-03T08:06:22,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T08:06:22,435 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-03T08:06:22,436 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T08:06:22,436 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T08:06:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T08:06:22,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39943 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T08:06:22,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,592 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 584250ce97fa16ae89a31211cb2b8d02 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-03T08:06:22,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/.tmp/cf/29055cf2d7a94cd4bf55e59ba1bda0cd is 36, key is row/cf:cq/1733213182430/Put/seqid=0 2024-12-03T08:06:22,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741839_1015 (size=4787) 2024-12-03T08:06:22,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741839_1015 (size=4787) 2024-12-03T08:06:22,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741839_1015 (size=4787) 2024-12-03T08:06:22,618 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/.tmp/cf/29055cf2d7a94cd4bf55e59ba1bda0cd 2024-12-03T08:06:22,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/.tmp/cf/29055cf2d7a94cd4bf55e59ba1bda0cd as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/cf/29055cf2d7a94cd4bf55e59ba1bda0cd 2024-12-03T08:06:22,636 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/cf/29055cf2d7a94cd4bf55e59ba1bda0cd, entries=1, sequenceid=5, filesize=4.7 K 2024-12-03T08:06:22,637 INFO [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 584250ce97fa16ae89a31211cb2b8d02 in 45ms, sequenceid=5, compaction requested=false 2024-12-03T08:06:22,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 584250ce97fa16ae89a31211cb2b8d02: 2024-12-03T08:06:22,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/911db94732f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T08:06:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T08:06:22,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T08:06:22,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-12-03T08:06:22,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 213 msec 2024-12-03T08:06:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T08:06:22,748 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T08:06:22,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T08:06:22,752 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T08:06:22,752 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:22,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,753 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T08:06:22,753 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T08:06:22,753 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1012957871, stopped=false 2024-12-03T08:06:22,753 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=911db94732f6,41243,1733213180649 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:22,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:22,755 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T08:06:22,755 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T08:06:22,755 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:22,755 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:22,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:22,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:22,756 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '911db94732f6,39943,1733213180694' ***** 2024-12-03T08:06:22,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T08:06:22,756 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T08:06:22,756 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '911db94732f6,38851,1733213180722' ***** 2024-12-03T08:06:22,756 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T08:06:22,756 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '911db94732f6,39409,1733213180749' ***** 2024-12-03T08:06:22,756 INFO [RS:0;911db94732f6:39943 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T08:06:22,756 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T08:06:22,756 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T08:06:22,756 INFO [RS:2;911db94732f6:39409 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T08:06:22,756 INFO [RS:2;911db94732f6:39409 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T08:06:22,756 INFO [RS:0;911db94732f6:39943 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T08:06:22,757 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T08:06:22,757 INFO [RS:2;911db94732f6:39409 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T08:06:22,757 INFO [RS:0;911db94732f6:39943 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T08:06:22,757 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(3091): Received CLOSE for 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,757 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(959): stopping server 911db94732f6,39409,1733213180749 2024-12-03T08:06:22,757 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T08:06:22,757 INFO [RS:2;911db94732f6:39409 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T08:06:22,757 INFO [RS:2;911db94732f6:39409 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;911db94732f6:39409. 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(959): stopping server 911db94732f6,38851,1733213180722 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:22,757 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(959): stopping server 911db94732f6,39943,1733213180694 2024-12-03T08:06:22,757 DEBUG [RS:2;911db94732f6:39409 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;911db94732f6:38851. 2024-12-03T08:06:22,757 INFO [RS:0;911db94732f6:39943 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:22,757 DEBUG [RS:2;911db94732f6:39409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,757 INFO [RS:0;911db94732f6:39943 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;911db94732f6:39943. 2024-12-03T08:06:22,757 DEBUG [RS:1;911db94732f6:38851 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:22,757 DEBUG [RS:1;911db94732f6:38851 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,757 INFO [RS:2;911db94732f6:39409 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T08:06:22,757 DEBUG [RS:0;911db94732f6:39943 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T08:06:22,757 INFO [RS:2;911db94732f6:39409 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T08:06:22,757 DEBUG [RS:0;911db94732f6:39943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,757 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(976): stopping server 911db94732f6,38851,1733213180722; all regions closed. 2024-12-03T08:06:22,758 INFO [RS:2;911db94732f6:39409 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T08:06:22,758 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T08:06:22,758 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1325): Online Regions={584250ce97fa16ae89a31211cb2b8d02=TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02.} 2024-12-03T08:06:22,758 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T08:06:22,758 DEBUG [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1351): Waiting on 584250ce97fa16ae89a31211cb2b8d02 2024-12-03T08:06:22,758 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 584250ce97fa16ae89a31211cb2b8d02, disabling compactions & flushes 2024-12-03T08:06:22,758 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T08:06:22,758 INFO [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T08:06:22,758 DEBUG [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. after waiting 0 ms 2024-12-03T08:06:22,758 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T08:06:22,758 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T08:06:22,758 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T08:06:22,758 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T08:06:22,758 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,759 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-03T08:06:22,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741835_1011 (size=93) 2024-12-03T08:06:22,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741835_1011 (size=93) 2024-12-03T08:06:22,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741835_1011 (size=93) 2024-12-03T08:06:22,765 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/default/TestHBaseWalOnEC/584250ce97fa16ae89a31211cb2b8d02/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T08:06:22,766 INFO [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,766 DEBUG [RS:1;911db94732f6:38851 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs 2024-12-03T08:06:22,766 INFO [RS:1;911db94732f6:38851 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 911db94732f6%2C38851%2C1733213180722:(num 1733213181164) 2024-12-03T08:06:22,766 DEBUG [RS:1;911db94732f6:38851 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,766 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 584250ce97fa16ae89a31211cb2b8d02: Waiting for close lock at 1733213182758Running coprocessor pre-close hooks at 1733213182758Disabling compacts and flushes for region at 1733213182758Disabling writes for close at 1733213182758Writing region close event to WAL at 1733213182759 (+1 ms)Running coprocessor post-close hooks at 1733213182766 (+7 ms)Closed at 1733213182766 2024-12-03T08:06:22,766 INFO [RS:1;911db94732f6:38851 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:22,766 INFO [RS:1;911db94732f6:38851 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:22,766 DEBUG [RS_CLOSE_REGION-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02. 2024-12-03T08:06:22,767 INFO [RS:1;911db94732f6:38851 {}] hbase.ChoreService(370): Chore service for: regionserver/911db94732f6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:22,767 INFO [RS:1;911db94732f6:38851 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T08:06:22,767 INFO [regionserver/911db94732f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:22,767 INFO [RS:1;911db94732f6:38851 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T08:06:22,767 INFO [RS:1;911db94732f6:38851 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T08:06:22,767 INFO [RS:1;911db94732f6:38851 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:22,767 INFO [RS:1;911db94732f6:38851 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38851 2024-12-03T08:06:22,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/911db94732f6,38851,1733213180722 2024-12-03T08:06:22,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:22,769 INFO [RS:1;911db94732f6:38851 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:22,771 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [911db94732f6,38851,1733213180722] 2024-12-03T08:06:22,772 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/911db94732f6,38851,1733213180722 already deleted, retry=false 2024-12-03T08:06:22,772 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 911db94732f6,38851,1733213180722 expired; onlineServers=2 2024-12-03T08:06:22,782 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/info/b3bc1f21c4f642f4b505708b19019380 is 153, key is TestHBaseWalOnEC,,1733213181783.584250ce97fa16ae89a31211cb2b8d02./info:regioninfo/1733213182160/Put/seqid=0 2024-12-03T08:06:22,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741840_1016 (size=6637) 2024-12-03T08:06:22,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741840_1016 (size=6637) 2024-12-03T08:06:22,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741840_1016 (size=6637) 2024-12-03T08:06:22,790 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/info/b3bc1f21c4f642f4b505708b19019380 2024-12-03T08:06:22,812 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/ns/434757d267ea48aeafd9187d4b466bce is 43, key is default/ns:d/1733213181660/Put/seqid=0 2024-12-03T08:06:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741841_1017 (size=5153) 2024-12-03T08:06:22,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741841_1017 (size=5153) 2024-12-03T08:06:22,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741841_1017 (size=5153) 2024-12-03T08:06:22,820 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/ns/434757d267ea48aeafd9187d4b466bce 2024-12-03T08:06:22,822 INFO [regionserver/911db94732f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:22,825 INFO [regionserver/911db94732f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:22,829 INFO [regionserver/911db94732f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:22,842 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/table/16f62a0723a3490f94da4eedb76f53ea is 52, key is TestHBaseWalOnEC/table:state/1733213182175/Put/seqid=0 2024-12-03T08:06:22,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741842_1018 (size=5249) 2024-12-03T08:06:22,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741842_1018 (size=5249) 2024-12-03T08:06:22,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741842_1018 (size=5249) 2024-12-03T08:06:22,849 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/table/16f62a0723a3490f94da4eedb76f53ea 2024-12-03T08:06:22,857 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/info/b3bc1f21c4f642f4b505708b19019380 as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/info/b3bc1f21c4f642f4b505708b19019380 2024-12-03T08:06:22,866 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/info/b3bc1f21c4f642f4b505708b19019380, entries=10, sequenceid=11, filesize=6.5 K 2024-12-03T08:06:22,867 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/ns/434757d267ea48aeafd9187d4b466bce as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/ns/434757d267ea48aeafd9187d4b466bce 2024-12-03T08:06:22,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:22,871 INFO [RS:1;911db94732f6:38851 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:22,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38851-0x101522147800002, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:22,871 INFO [RS:1;911db94732f6:38851 {}] regionserver.HRegionServer(1031): Exiting; stopping=911db94732f6,38851,1733213180722; zookeeper connection closed. 2024-12-03T08:06:22,871 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5deef0ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5deef0ca 2024-12-03T08:06:22,873 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/ns/434757d267ea48aeafd9187d4b466bce, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T08:06:22,874 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/.tmp/table/16f62a0723a3490f94da4eedb76f53ea as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/table/16f62a0723a3490f94da4eedb76f53ea 2024-12-03T08:06:22,881 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/table/16f62a0723a3490f94da4eedb76f53ea, entries=2, sequenceid=11, filesize=5.1 K 2024-12-03T08:06:22,883 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-12-03T08:06:22,888 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T08:06:22,888 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T08:06:22,888 INFO [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T08:06:22,889 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733213182758Running coprocessor pre-close hooks at 1733213182758Disabling compacts and flushes for region at 1733213182758Disabling writes for close at 1733213182758Obtaining lock to block concurrent updates at 1733213182759 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733213182759Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733213182759Flushing stores of hbase:meta,,1.1588230740 at 1733213182760 (+1 ms)Flushing 1588230740/info: creating writer at 1733213182761 (+1 ms)Flushing 1588230740/info: appending metadata at 1733213182781 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733213182781Flushing 1588230740/ns: creating writer at 1733213182797 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733213182812 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733213182812Flushing 1588230740/table: creating writer at 1733213182827 (+15 ms)Flushing 1588230740/table: appending metadata at 1733213182841 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733213182842 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14a16691: reopening flushed file at 1733213182856 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31a1db8c: reopening flushed file at 1733213182866 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f5b1a31: reopening flushed file at 1733213182873 (+7 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false at 1733213182883 (+10 ms)Writing region close event to WAL at 1733213182884 (+1 ms)Running coprocessor post-close hooks at 1733213182888 (+4 ms)Closed at 1733213182888 2024-12-03T08:06:22,889 DEBUG [RS_CLOSE_META-regionserver/911db94732f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T08:06:22,958 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(976): stopping server 911db94732f6,39943,1733213180694; all regions closed. 2024-12-03T08:06:22,958 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(976): stopping server 911db94732f6,39409,1733213180749; all regions closed. 2024-12-03T08:06:22,958 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,958 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741834_1010 (size=1298) 2024-12-03T08:06:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741836_1012 (size=2751) 2024-12-03T08:06:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741834_1010 (size=1298) 2024-12-03T08:06:22,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741834_1010 (size=1298) 2024-12-03T08:06:22,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741836_1012 (size=2751) 2024-12-03T08:06:22,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741836_1012 (size=2751) 2024-12-03T08:06:22,966 DEBUG [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs 2024-12-03T08:06:22,966 INFO [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 911db94732f6%2C39409%2C1733213180749.meta:.meta(num 1733213181605) 2024-12-03T08:06:22,966 DEBUG [RS:0;911db94732f6:39943 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs 2024-12-03T08:06:22,966 INFO [RS:0;911db94732f6:39943 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 911db94732f6%2C39943%2C1733213180694:(num 1733213181164) 2024-12-03T08:06:22,966 DEBUG [RS:0;911db94732f6:39943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,966 INFO [RS:0;911db94732f6:39943 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:22,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,966 INFO [RS:0;911db94732f6:39943 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:22,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,967 INFO [RS:0;911db94732f6:39943 {}] hbase.ChoreService(370): Chore service for: regionserver/911db94732f6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:22,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,967 INFO [RS:0;911db94732f6:39943 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T08:06:22,967 INFO [RS:0;911db94732f6:39943 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T08:06:22,967 INFO [regionserver/911db94732f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:22,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:22,967 INFO [RS:0;911db94732f6:39943 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T08:06:22,967 INFO [RS:0;911db94732f6:39943 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:22,967 INFO [RS:0;911db94732f6:39943 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39943 2024-12-03T08:06:22,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:22,969 INFO [RS:0;911db94732f6:39943 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:22,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/911db94732f6,39943,1733213180694 2024-12-03T08:06:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741833_1009 (size=93) 2024-12-03T08:06:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741833_1009 (size=93) 2024-12-03T08:06:22,971 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [911db94732f6,39943,1733213180694] 2024-12-03T08:06:22,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741833_1009 (size=93) 2024-12-03T08:06:22,972 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/911db94732f6,39943,1733213180694 already deleted, retry=false 2024-12-03T08:06:22,973 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 911db94732f6,39943,1733213180694 expired; onlineServers=1 2024-12-03T08:06:22,973 DEBUG [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/oldWALs 2024-12-03T08:06:22,973 INFO [RS:2;911db94732f6:39409 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 911db94732f6%2C39409%2C1733213180749:(num 1733213181162) 2024-12-03T08:06:22,973 DEBUG [RS:2;911db94732f6:39409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T08:06:22,973 INFO [RS:2;911db94732f6:39409 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T08:06:22,973 INFO [RS:2;911db94732f6:39409 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:22,973 INFO [RS:2;911db94732f6:39409 {}] hbase.ChoreService(370): Chore service for: regionserver/911db94732f6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:22,974 INFO [RS:2;911db94732f6:39409 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:22,974 INFO [regionserver/911db94732f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:22,974 INFO [RS:2;911db94732f6:39409 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39409 2024-12-03T08:06:22,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/911db94732f6,39409,1733213180749 2024-12-03T08:06:22,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T08:06:22,976 INFO [RS:2;911db94732f6:39409 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:22,977 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [911db94732f6,39409,1733213180749] 2024-12-03T08:06:22,978 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/911db94732f6,39409,1733213180749 already deleted, retry=false 2024-12-03T08:06:22,978 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 911db94732f6,39409,1733213180749 expired; onlineServers=0 2024-12-03T08:06:22,978 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '911db94732f6,41243,1733213180649' ***** 2024-12-03T08:06:22,978 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T08:06:22,978 INFO [M:0;911db94732f6:41243 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T08:06:22,978 INFO [M:0;911db94732f6:41243 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T08:06:22,979 DEBUG [M:0;911db94732f6:41243 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T08:06:22,979 DEBUG [M:0;911db94732f6:41243 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T08:06:22,979 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T08:06:22,979 DEBUG [master/911db94732f6:0:becomeActiveMaster-HFileCleaner.large.0-1733213180965 {}] cleaner.HFileCleaner(306): Exit Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.large.0-1733213180965,5,FailOnTimeoutGroup] 2024-12-03T08:06:22,979 DEBUG [master/911db94732f6:0:becomeActiveMaster-HFileCleaner.small.0-1733213180966 {}] cleaner.HFileCleaner(306): Exit Thread[master/911db94732f6:0:becomeActiveMaster-HFileCleaner.small.0-1733213180966,5,FailOnTimeoutGroup] 2024-12-03T08:06:22,979 INFO [M:0;911db94732f6:41243 {}] hbase.ChoreService(370): Chore service for: master/911db94732f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T08:06:22,979 INFO [M:0;911db94732f6:41243 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T08:06:22,979 DEBUG [M:0;911db94732f6:41243 {}] master.HMaster(1795): Stopping service threads 2024-12-03T08:06:22,979 INFO [M:0;911db94732f6:41243 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T08:06:22,979 INFO [M:0;911db94732f6:41243 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T08:06:22,980 INFO [M:0;911db94732f6:41243 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T08:06:22,980 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T08:06:22,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T08:06:22,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T08:06:22,980 DEBUG [M:0;911db94732f6:41243 {}] zookeeper.ZKUtil(347): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T08:06:22,980 WARN [M:0;911db94732f6:41243 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T08:06:22,981 INFO [M:0;911db94732f6:41243 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/.lastflushedseqids 2024-12-03T08:06:22,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741843_1019 (size=127) 2024-12-03T08:06:22,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741843_1019 (size=127) 2024-12-03T08:06:22,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741843_1019 (size=127) 2024-12-03T08:06:22,993 INFO [M:0;911db94732f6:41243 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T08:06:22,993 INFO [M:0;911db94732f6:41243 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T08:06:22,993 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T08:06:22,993 INFO [M:0;911db94732f6:41243 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:22,993 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:22,993 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T08:06:22,993 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:22,993 INFO [M:0;911db94732f6:41243 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-03T08:06:23,011 DEBUG [M:0;911db94732f6:41243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3cabaa1ae3d14bee9ab9239009184043 is 82, key is hbase:meta,,1/info:regioninfo/1733213181642/Put/seqid=0 2024-12-03T08:06:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741844_1020 (size=5672) 2024-12-03T08:06:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741844_1020 (size=5672) 2024-12-03T08:06:23,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741844_1020 (size=5672) 2024-12-03T08:06:23,019 INFO [M:0;911db94732f6:41243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3cabaa1ae3d14bee9ab9239009184043 2024-12-03T08:06:23,041 DEBUG [M:0;911db94732f6:41243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/96e2a41bbe06441eb674aae54927edeb is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733213182180/Put/seqid=0 2024-12-03T08:06:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741845_1021 (size=6438) 2024-12-03T08:06:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741845_1021 (size=6438) 2024-12-03T08:06:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741845_1021 (size=6438) 2024-12-03T08:06:23,049 INFO [M:0;911db94732f6:41243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/96e2a41bbe06441eb674aae54927edeb 2024-12-03T08:06:23,071 DEBUG [M:0;911db94732f6:41243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3134ddc4af354113af16ad833b208173 is 69, key is 911db94732f6,38851,1733213180722/rs:state/1733213180997/Put/seqid=0 2024-12-03T08:06:23,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:23,072 INFO [RS:0;911db94732f6:39943 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:23,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39943-0x101522147800001, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:23,072 INFO [RS:0;911db94732f6:39943 {}] regionserver.HRegionServer(1031): Exiting; stopping=911db94732f6,39943,1733213180694; zookeeper connection closed. 2024-12-03T08:06:23,072 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d2edace {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d2edace 2024-12-03T08:06:23,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:23,077 INFO [RS:2;911db94732f6:39409 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:23,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39409-0x101522147800003, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:23,077 INFO [RS:2;911db94732f6:39409 {}] regionserver.HRegionServer(1031): Exiting; stopping=911db94732f6,39409,1733213180749; zookeeper connection closed. 2024-12-03T08:06:23,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741846_1022 (size=5294) 2024-12-03T08:06:23,078 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70e02e9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70e02e9f 2024-12-03T08:06:23,078 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-03T08:06:23,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741846_1022 (size=5294) 2024-12-03T08:06:23,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741846_1022 (size=5294) 2024-12-03T08:06:23,078 INFO [M:0;911db94732f6:41243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3134ddc4af354113af16ad833b208173 2024-12-03T08:06:23,085 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3cabaa1ae3d14bee9ab9239009184043 as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3cabaa1ae3d14bee9ab9239009184043 2024-12-03T08:06:23,092 INFO [M:0;911db94732f6:41243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3cabaa1ae3d14bee9ab9239009184043, entries=8, sequenceid=72, filesize=5.5 K 2024-12-03T08:06:23,094 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/96e2a41bbe06441eb674aae54927edeb as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/96e2a41bbe06441eb674aae54927edeb 2024-12-03T08:06:23,099 INFO [M:0;911db94732f6:41243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/96e2a41bbe06441eb674aae54927edeb, entries=8, sequenceid=72, filesize=6.3 K 2024-12-03T08:06:23,101 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3134ddc4af354113af16ad833b208173 as hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3134ddc4af354113af16ad833b208173 2024-12-03T08:06:23,107 INFO [M:0;911db94732f6:41243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40423/user/jenkins/test-data/041f0852-66c6-7e64-18c0-5ea779b338b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3134ddc4af354113af16ad833b208173, entries=3, sequenceid=72, filesize=5.2 K 2024-12-03T08:06:23,109 INFO [M:0;911db94732f6:41243 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=72, compaction requested=false 2024-12-03T08:06:23,110 INFO [M:0;911db94732f6:41243 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T08:06:23,110 DEBUG [M:0;911db94732f6:41243 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733213182993Disabling compacts and flushes for region at 1733213182993Disabling writes for close at 1733213182993Obtaining lock to block concurrent updates at 1733213182993Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733213182993Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733213182994 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733213182995 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733213182995Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733213183011 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733213183011Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733213183025 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733213183041 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733213183041Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733213183055 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733213183070 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733213183070Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d2566dc: reopening flushed file at 1733213183084 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a94cf1: reopening flushed file at 1733213183092 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@532b0164: reopening flushed file at 1733213183100 (+8 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=72, compaction requested=false at 1733213183109 (+9 ms)Writing region close event to WAL at 1733213183110 (+1 ms)Closed at 1733213183110 2024-12-03T08:06:23,110 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:23,111 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:23,111 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:23,111 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:23,111 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T08:06:23,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37929 is added to blk_1073741830_1006 (size=32665) 2024-12-03T08:06:23,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40797 is added to blk_1073741830_1006 (size=32665) 2024-12-03T08:06:23,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41749 is added to blk_1073741830_1006 (size=32665) 2024-12-03T08:06:23,115 INFO [M:0;911db94732f6:41243 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T08:06:23,115 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T08:06:23,115 INFO [M:0;911db94732f6:41243 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41243 2024-12-03T08:06:23,115 INFO [M:0;911db94732f6:41243 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T08:06:23,218 INFO [M:0;911db94732f6:41243 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T08:06:23,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:23,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41243-0x101522147800000, quorum=127.0.0.1:49329, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T08:06:23,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@190e176c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:23,220 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14e9278c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:23,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:23,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38e5384{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:23,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40b03519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:23,222 WARN [BP-1691174847-172.17.0.2-1733213179807 heartbeating to localhost/127.0.0.1:40423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T08:06:23,222 WARN [BP-1691174847-172.17.0.2-1733213179807 heartbeating to localhost/127.0.0.1:40423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1691174847-172.17.0.2-1733213179807 (Datanode Uuid 2209e16f-937f-4fe9-93e3-d44bcf16f7a2) service to localhost/127.0.0.1:40423 2024-12-03T08:06:23,222 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T08:06:23,222 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T08:06:23,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data5/current/BP-1691174847-172.17.0.2-1733213179807 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:23,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data6/current/BP-1691174847-172.17.0.2-1733213179807 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:23,223 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T08:06:23,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a04b23b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:23,227 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ab72a47{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:23,228 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:23,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e4c23ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:23,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55cf3a01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:23,229 WARN [BP-1691174847-172.17.0.2-1733213179807 heartbeating to localhost/127.0.0.1:40423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T08:06:23,229 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T08:06:23,229 WARN [BP-1691174847-172.17.0.2-1733213179807 heartbeating to localhost/127.0.0.1:40423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1691174847-172.17.0.2-1733213179807 (Datanode Uuid cb3475ab-0227-4db2-a2f8-f4e454f5ea3d) service to localhost/127.0.0.1:40423 2024-12-03T08:06:23,229 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T08:06:23,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data3/current/BP-1691174847-172.17.0.2-1733213179807 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:23,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data4/current/BP-1691174847-172.17.0.2-1733213179807 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:23,230 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T08:06:23,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16eaa68d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T08:06:23,233 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9885f6c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:23,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:23,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45b09adf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:23,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72f96008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:23,235 WARN [BP-1691174847-172.17.0.2-1733213179807 heartbeating to localhost/127.0.0.1:40423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T08:06:23,235 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T08:06:23,235 WARN [BP-1691174847-172.17.0.2-1733213179807 heartbeating to localhost/127.0.0.1:40423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1691174847-172.17.0.2-1733213179807 (Datanode Uuid ec9f2476-cbd7-4809-ad1c-193149059eb9) service to localhost/127.0.0.1:40423 2024-12-03T08:06:23,235 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T08:06:23,235 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data1/current/BP-1691174847-172.17.0.2-1733213179807 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:23,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/cluster_4ddc1c89-a983-1f9d-34b8-314041aefec1/data/data2/current/BP-1691174847-172.17.0.2-1733213179807 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T08:06:23,236 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T08:06:23,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41ad60e4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T08:06:23,242 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4cf39bc8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T08:06:23,242 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T08:06:23,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d952814{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T08:06:23,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e58533{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5a41578b-e884-5793-a885-12779435a8f0/hadoop.log.dir/,STOPPED} 2024-12-03T08:06:23,250 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T08:06:23,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T08:06:23,277 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T08:06:23,283 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 90) - Thread LEAK? -, OpenFileDescriptor=519 (was 437) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=95 (was 69) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8439 (was 8571) 2024-12-03T08:06:23,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null