2024-12-03 14:45:35,949 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-03 14:45:35,960 main DEBUG Took 0.008934 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 14:45:35,960 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 14:45:35,961 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 14:45:35,962 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 14:45:35,963 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:35,974 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 14:45:35,994 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:35,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:35,997 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:35,997 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:35,998 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:35,998 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:35,999 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,000 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,000 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,001 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,002 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,002 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,003 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,003 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,004 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,004 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,005 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,005 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,006 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,006 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,007 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,007 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,008 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,008 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 14:45:36,009 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,009 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 14:45:36,011 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 14:45:36,012 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 14:45:36,014 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 14:45:36,015 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 14:45:36,016 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 14:45:36,017 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 14:45:36,026 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 14:45:36,029 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 14:45:36,031 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 14:45:36,032 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 14:45:36,032 main DEBUG createAppenders(={Console}) 2024-12-03 14:45:36,033 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-03 14:45:36,034 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-03 14:45:36,034 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-03 14:45:36,035 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 14:45:36,035 main DEBUG OutputStream closed 2024-12-03 14:45:36,035 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 14:45:36,036 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 14:45:36,036 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-03 14:45:36,127 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 14:45:36,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 14:45:36,131 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 14:45:36,132 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 14:45:36,133 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 14:45:36,133 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 14:45:36,134 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 14:45:36,134 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 14:45:36,134 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 14:45:36,135 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 14:45:36,135 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 14:45:36,136 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 14:45:36,136 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 14:45:36,136 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 14:45:36,137 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 14:45:36,137 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 14:45:36,138 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 14:45:36,138 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 14:45:36,140 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 14:45:36,141 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-03 14:45:36,141 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 14:45:36,142 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-03T14:45:36,163 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-03 14:45:36,167 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 14:45:36,168 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T14:45:36,416 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8 2024-12-03T14:45:36,450 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933, deleteOnExit=true 2024-12-03T14:45:36,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/test.cache.data in system properties and HBase conf 2024-12-03T14:45:36,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T14:45:36,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir in system properties and HBase conf 2024-12-03T14:45:36,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T14:45:36,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T14:45:36,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T14:45:36,551 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T14:45:36,666 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T14:45:36,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T14:45:36,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T14:45:36,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T14:45:36,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T14:45:36,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T14:45:36,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T14:45:36,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T14:45:36,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T14:45:36,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T14:45:36,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/nfs.dump.dir in system properties and HBase conf 2024-12-03T14:45:36,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/java.io.tmpdir in system properties and HBase conf 2024-12-03T14:45:36,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T14:45:36,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T14:45:36,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T14:45:37,540 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T14:45:37,619 INFO [Time-limited test {}] log.Log(170): Logging initialized @2347ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T14:45:37,683 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:37,739 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:37,755 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:37,755 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:37,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T14:45:37,767 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:37,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:37,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:37,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/java.io.tmpdir/jetty-localhost-38207-hadoop-hdfs-3_4_1-tests_jar-_-any-13411036162522093839/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T14:45:37,952 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:38207} 2024-12-03T14:45:37,952 INFO [Time-limited test {}] server.Server(415): Started @2681ms 2024-12-03T14:45:38,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:38,273 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:38,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:38,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:38,275 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T14:45:38,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:38,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:38,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/java.io.tmpdir/jetty-localhost-39201-hadoop-hdfs-3_4_1-tests_jar-_-any-2963395927136021988/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:38,370 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:39201} 2024-12-03T14:45:38,370 INFO [Time-limited test {}] server.Server(415): Started @3098ms 2024-12-03T14:45:38,415 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T14:45:38,515 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:38,522 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:38,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:38,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:38,526 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T14:45:38,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:38,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:38,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/java.io.tmpdir/jetty-localhost-46343-hadoop-hdfs-3_4_1-tests_jar-_-any-13588103893159721664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:38,631 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:46343} 2024-12-03T14:45:38,631 INFO [Time-limited test {}] server.Server(415): Started @3360ms 2024-12-03T14:45:38,633 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T14:45:38,687 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:38,692 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:38,693 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:38,693 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:38,694 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T14:45:38,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:38,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:38,789 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data2/current/BP-1130478452-172.17.0.2-1733237137316/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:38,789 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data3/current/BP-1130478452-172.17.0.2-1733237137316/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:38,789 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data1/current/BP-1130478452-172.17.0.2-1733237137316/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:38,789 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data4/current/BP-1130478452-172.17.0.2-1733237137316/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:38,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/java.io.tmpdir/jetty-localhost-36711-hadoop-hdfs-3_4_1-tests_jar-_-any-1841265188038600843/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:38,813 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:36711} 2024-12-03T14:45:38,813 INFO [Time-limited test {}] server.Server(415): Started @3542ms 2024-12-03T14:45:38,816 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T14:45:38,824 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T14:45:38,825 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T14:45:38,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc88e039e611221b2 with lease ID 0xd624bda643afdb4d: Processing first storage report for DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62 from datanode DatanodeRegistration(127.0.0.1:39911, datanodeUuid=9dffe93c-c4b5-43e6-9399-62f92947c400, infoPort=43401, infoSecurePort=0, ipcPort=34405, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316) 2024-12-03T14:45:38,889 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc88e039e611221b2 with lease ID 0xd624bda643afdb4d: from storage DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62 node DatanodeRegistration(127.0.0.1:39911, datanodeUuid=9dffe93c-c4b5-43e6-9399-62f92947c400, infoPort=43401, infoSecurePort=0, ipcPort=34405, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T14:45:38,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd77e906494f3724 with lease ID 0xd624bda643afdb4c: Processing first storage report for DS-f35aca29-109e-4999-b0d1-5548398dfdee from datanode DatanodeRegistration(127.0.0.1:44665, datanodeUuid=75c034ed-3f31-43fa-b73b-adccae630ca5, infoPort=43505, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316) 2024-12-03T14:45:38,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd77e906494f3724 with lease ID 0xd624bda643afdb4c: from storage DS-f35aca29-109e-4999-b0d1-5548398dfdee node DatanodeRegistration(127.0.0.1:44665, datanodeUuid=75c034ed-3f31-43fa-b73b-adccae630ca5, infoPort=43505, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T14:45:38,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc88e039e611221b2 with lease ID 0xd624bda643afdb4d: Processing first storage report for DS-e94805f1-62c2-4b06-b4e9-1f8d1d6b5864 from datanode DatanodeRegistration(127.0.0.1:39911, datanodeUuid=9dffe93c-c4b5-43e6-9399-62f92947c400, infoPort=43401, infoSecurePort=0, ipcPort=34405, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316) 2024-12-03T14:45:38,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc88e039e611221b2 with lease ID 0xd624bda643afdb4d: from storage DS-e94805f1-62c2-4b06-b4e9-1f8d1d6b5864 node DatanodeRegistration(127.0.0.1:39911, datanodeUuid=9dffe93c-c4b5-43e6-9399-62f92947c400, infoPort=43401, infoSecurePort=0, ipcPort=34405, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:38,891 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd77e906494f3724 with lease ID 0xd624bda643afdb4c: Processing first storage report for DS-07755eb1-82f3-403b-87c6-81c7a2aa6939 from datanode DatanodeRegistration(127.0.0.1:44665, datanodeUuid=75c034ed-3f31-43fa-b73b-adccae630ca5, infoPort=43505, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316) 2024-12-03T14:45:38,891 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd77e906494f3724 with lease ID 0xd624bda643afdb4c: from storage DS-07755eb1-82f3-403b-87c6-81c7a2aa6939 node DatanodeRegistration(127.0.0.1:44665, datanodeUuid=75c034ed-3f31-43fa-b73b-adccae630ca5, infoPort=43505, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T14:45:38,904 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data5/current/BP-1130478452-172.17.0.2-1733237137316/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:38,905 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data6/current/BP-1130478452-172.17.0.2-1733237137316/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:38,931 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T14:45:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52e61d1de615f704 with lease ID 0xd624bda643afdb4e: Processing first storage report for DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11 from datanode DatanodeRegistration(127.0.0.1:44445, datanodeUuid=dd709686-6852-47e2-8aa3-3e6c5f534ca8, infoPort=40167, infoSecurePort=0, ipcPort=38743, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316) 2024-12-03T14:45:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52e61d1de615f704 with lease ID 0xd624bda643afdb4e: from storage DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11 node DatanodeRegistration(127.0.0.1:44445, datanodeUuid=dd709686-6852-47e2-8aa3-3e6c5f534ca8, infoPort=40167, infoSecurePort=0, ipcPort=38743, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T14:45:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52e61d1de615f704 with lease ID 0xd624bda643afdb4e: Processing first storage report for DS-512e6b84-364a-4bd5-bcf4-ce5d6e182b08 from datanode DatanodeRegistration(127.0.0.1:44445, datanodeUuid=dd709686-6852-47e2-8aa3-3e6c5f534ca8, infoPort=40167, infoSecurePort=0, ipcPort=38743, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316) 2024-12-03T14:45:38,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52e61d1de615f704 with lease ID 0xd624bda643afdb4e: from storage DS-512e6b84-364a-4bd5-bcf4-ce5d6e182b08 node DatanodeRegistration(127.0.0.1:44445, datanodeUuid=dd709686-6852-47e2-8aa3-3e6c5f534ca8, infoPort=40167, infoSecurePort=0, ipcPort=38743, storageInfo=lv=-57;cid=testClusterID;nsid=1544998755;c=1733237137316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:39,121 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8 2024-12-03T14:45:39,181 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-03T14:45:39,226 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=156, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=310, ProcessCount=11, AvailableMemoryMB=5608 2024-12-03T14:45:39,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T14:45:39,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-03T14:45:39,301 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/zookeeper_0, clientPort=55238, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T14:45:39,311 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55238 2024-12-03T14:45:39,320 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:39,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:39,414 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:39,415 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:39,459 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:45346 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:44665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45346 dst: /127.0.0.1:44665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:39,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-03T14:45:39,878 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:39,891 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920 with version=8 2024-12-03T14:45:39,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/hbase-staging 2024-12-03T14:45:39,963 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T14:45:40,150 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:40,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,163 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:40,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:40,277 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T14:45:40,327 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T14:45:40,335 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T14:45:40,338 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:40,357 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 78514 (auto-detected) 2024-12-03T14:45:40,358 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T14:45:40,372 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43511 2024-12-03T14:45:40,389 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43511 connecting to ZooKeeper ensemble=127.0.0.1:55238 2024-12-03T14:45:40,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435110x0, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:40,417 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43511-0x100a08b43c40000 connected 2024-12-03T14:45:40,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,448 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,460 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:40,464 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920, hbase.cluster.distributed=false 2024-12-03T14:45:40,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:40,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43511 2024-12-03T14:45:40,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43511 2024-12-03T14:45:40,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43511 2024-12-03T14:45:40,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43511 2024-12-03T14:45:40,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43511 2024-12-03T14:45:40,575 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:40,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,577 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:40,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:40,579 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T14:45:40,581 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:40,582 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36997 2024-12-03T14:45:40,583 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36997 connecting to ZooKeeper ensemble=127.0.0.1:55238 2024-12-03T14:45:40,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369970x0, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:40,593 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:369970x0, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:40,593 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36997-0x100a08b43c40001 connected 2024-12-03T14:45:40,597 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T14:45:40,603 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T14:45:40,605 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T14:45:40,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:40,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36997 2024-12-03T14:45:40,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36997 2024-12-03T14:45:40,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36997 2024-12-03T14:45:40,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36997 2024-12-03T14:45:40,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36997 2024-12-03T14:45:40,626 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:40,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,627 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:40,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:40,628 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T14:45:40,628 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:40,629 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43899 2024-12-03T14:45:40,631 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43899 connecting to ZooKeeper ensemble=127.0.0.1:55238 2024-12-03T14:45:40,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,636 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,643 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:438990x0, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:40,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43899-0x100a08b43c40002 connected 2024-12-03T14:45:40,643 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:40,644 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T14:45:40,650 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T14:45:40,652 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T14:45:40,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:40,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43899 2024-12-03T14:45:40,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43899 2024-12-03T14:45:40,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43899 2024-12-03T14:45:40,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43899 2024-12-03T14:45:40,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43899 2024-12-03T14:45:40,673 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:40,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,674 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:40,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:40,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:40,674 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T14:45:40,674 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:40,675 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37335 2024-12-03T14:45:40,677 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37335 connecting to ZooKeeper ensemble=127.0.0.1:55238 2024-12-03T14:45:40,679 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373350x0, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:40,686 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:373350x0, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:40,686 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37335-0x100a08b43c40003 connected 2024-12-03T14:45:40,687 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T14:45:40,689 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T14:45:40,691 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T14:45:40,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:40,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37335 2024-12-03T14:45:40,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37335 2024-12-03T14:45:40,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37335 2024-12-03T14:45:40,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37335 2024-12-03T14:45:40,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37335 2024-12-03T14:45:40,715 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a5d22df9eca2:43511 2024-12-03T14:45:40,716 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:40,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,721 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,723 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:40,741 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:40,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:40,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:40,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,741 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,742 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T14:45:40,743 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a5d22df9eca2,43511,1733237140004 from backup master directory 2024-12-03T14:45:40,745 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:40,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:40,746 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:40,746 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:40,748 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T14:45:40,749 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T14:45:40,807 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/hbase.id] with ID: 6d1ec690-d930-4976-b6c6-286627144007 2024-12-03T14:45:40,807 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/.tmp/hbase.id 2024-12-03T14:45:40,813 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,814 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,818 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:37184 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37184 dst: /127.0.0.1:39911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:40,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-03T14:45:40,824 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:40,825 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/.tmp/hbase.id]:[hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/hbase.id] 2024-12-03T14:45:40,868 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:40,872 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T14:45:40,888 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-03T14:45:40,891 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:40,902 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,902 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,904 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:45376 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:44665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45376 dst: /127.0.0.1:44665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:40,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-03T14:45:40,910 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:40,926 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T14:45:40,927 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T14:45:40,932 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T14:45:40,955 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,956 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:34788 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:44445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34788 dst: /127.0.0.1:44445 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:40,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-03T14:45:40,965 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:40,981 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store 2024-12-03T14:45:40,997 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:40,997 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:41,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:37216 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37216 dst: /127.0.0.1:39911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:41,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-03T14:45:41,005 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:41,009 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T14:45:41,011 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:41,012 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T14:45:41,013 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:41,013 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:41,014 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T14:45:41,014 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:41,014 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:41,015 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733237141012Disabling compacts and flushes for region at 1733237141012Disabling writes for close at 1733237141014 (+2 ms)Writing region close event to WAL at 1733237141014Closed at 1733237141014 2024-12-03T14:45:41,017 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/.initializing 2024-12-03T14:45:41,018 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/WALs/a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:41,025 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T14:45:41,039 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C43511%2C1733237140004, suffix=, logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/WALs/a5d22df9eca2,43511,1733237140004, archiveDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/oldWALs, maxLogs=10 2024-12-03T14:45:41,063 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/WALs/a5d22df9eca2,43511,1733237140004/a5d22df9eca2%2C43511%2C1733237140004.1733237141043, exclude list is [], retry=0 2024-12-03T14:45:41,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:41,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44445,DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11,DISK] 2024-12-03T14:45:41,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39911,DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62,DISK] 2024-12-03T14:45:41,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44665,DS-f35aca29-109e-4999-b0d1-5548398dfdee,DISK] 2024-12-03T14:45:41,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-03T14:45:41,116 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/WALs/a5d22df9eca2,43511,1733237140004/a5d22df9eca2%2C43511%2C1733237140004.1733237141043 2024-12-03T14:45:41,116 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43401:43401),(127.0.0.1/127.0.0.1:43505:43505),(127.0.0.1/127.0.0.1:40167:40167)] 2024-12-03T14:45:41,117 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T14:45:41,117 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:41,120 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,121 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T14:45:41,176 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:41,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T14:45:41,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:41,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T14:45:41,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:41,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T14:45:41,189 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:41,190 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,193 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,195 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,199 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,200 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,203 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T14:45:41,207 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:41,213 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T14:45:41,214 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70161388, jitterRate=0.04548615217208862}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T14:45:41,219 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733237141132Initializing all the Stores at 1733237141134 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237141134Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237141135 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237141135Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237141135Cleaning up temporary data from old regions at 1733237141200 (+65 ms)Region opened successfully at 1733237141219 (+19 ms) 2024-12-03T14:45:41,220 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T14:45:41,247 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@105b1cab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:41,273 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T14:45:41,282 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T14:45:41,282 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T14:45:41,284 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T14:45:41,285 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T14:45:41,289 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-03T14:45:41,289 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T14:45:41,310 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T14:45:41,317 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T14:45:41,319 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T14:45:41,321 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T14:45:41,322 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T14:45:41,323 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T14:45:41,325 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T14:45:41,328 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T14:45:41,329 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T14:45:41,331 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T14:45:41,332 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T14:45:41,345 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T14:45:41,347 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T14:45:41,349 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:41,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:41,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:41,350 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:41,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,353 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a5d22df9eca2,43511,1733237140004, sessionid=0x100a08b43c40000, setting cluster-up flag (Was=false) 2024-12-03T14:45:41,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,362 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,366 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T14:45:41,367 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:41,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,371 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:41,375 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T14:45:41,376 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:41,381 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T14:45:41,405 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(746): ClusterId : 6d1ec690-d930-4976-b6c6-286627144007 2024-12-03T14:45:41,405 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(746): ClusterId : 6d1ec690-d930-4976-b6c6-286627144007 2024-12-03T14:45:41,405 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(746): ClusterId : 6d1ec690-d930-4976-b6c6-286627144007 2024-12-03T14:45:41,407 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T14:45:41,407 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T14:45:41,407 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T14:45:41,411 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T14:45:41,411 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T14:45:41,411 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T14:45:41,411 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T14:45:41,412 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T14:45:41,412 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T14:45:41,414 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T14:45:41,414 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T14:45:41,415 DEBUG [RS:2;a5d22df9eca2:37335 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45345036, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:41,415 DEBUG [RS:0;a5d22df9eca2:36997 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@539bf533, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:41,415 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T14:45:41,416 DEBUG [RS:1;a5d22df9eca2:43899 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bdb1819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:41,433 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a5d22df9eca2:37335 2024-12-03T14:45:41,433 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a5d22df9eca2:36997 2024-12-03T14:45:41,436 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a5d22df9eca2:43899 2024-12-03T14:45:41,437 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T14:45:41,437 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T14:45:41,437 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T14:45:41,438 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T14:45:41,438 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T14:45:41,438 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T14:45:41,438 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T14:45:41,438 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T14:45:41,438 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T14:45:41,440 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,43511,1733237140004 with port=43899, startcode=1733237140626 2024-12-03T14:45:41,440 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,43511,1733237140004 with port=36997, startcode=1733237140546 2024-12-03T14:45:41,440 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,43511,1733237140004 with port=37335, startcode=1733237140672 2024-12-03T14:45:41,452 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:41,453 DEBUG [RS:1;a5d22df9eca2:43899 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T14:45:41,453 DEBUG [RS:0;a5d22df9eca2:36997 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T14:45:41,453 DEBUG [RS:2;a5d22df9eca2:37335 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T14:45:41,464 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T14:45:41,469 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T14:45:41,475 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a5d22df9eca2,43511,1733237140004 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T14:45:41,482 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:41,483 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:41,483 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:41,483 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:41,483 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a5d22df9eca2:0, corePoolSize=10, maxPoolSize=10 2024-12-03T14:45:41,483 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,483 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:41,484 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,489 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:41,490 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T14:45:41,490 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733237171490 2024-12-03T14:45:41,491 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46603, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T14:45:41,491 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38791, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T14:45:41,491 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60195, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T14:45:41,492 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T14:45:41,493 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T14:45:41,496 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T14:45:41,497 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T14:45:41,497 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T14:45:41,497 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T14:45:41,498 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T14:45:41,498 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,499 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T14:45:41,498 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,502 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T14:45:41,503 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T14:45:41,503 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T14:45:41,504 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T14:45:41,505 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T14:45:41,506 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T14:45:41,506 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T14:45:41,507 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:41,507 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:41,508 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.large.0-1733237141507,5,FailOnTimeoutGroup] 2024-12-03T14:45:41,513 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.small.0-1733237141508,5,FailOnTimeoutGroup] 2024-12-03T14:45:41,513 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,514 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T14:45:41,515 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,515 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:37236 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37236 dst: /127.0.0.1:39911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:41,516 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-03T14:45:41,522 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:41,523 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T14:45:41,524 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920 2024-12-03T14:45:41,526 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T14:45:41,526 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T14:45:41,526 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-03T14:45:41,526 WARN [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T14:45:41,526 WARN [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T14:45:41,527 WARN [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-03T14:45:41,530 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:41,530 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:41,535 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:34822 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:44445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34822 dst: /127.0.0.1:44445 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:41,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-03T14:45:41,543 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:41,545 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:41,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T14:45:41,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T14:45:41,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:41,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T14:45:41,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T14:45:41,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:41,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T14:45:41,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T14:45:41,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:41,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T14:45:41,561 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T14:45:41,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:41,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:41,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T14:45:41,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740 2024-12-03T14:45:41,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740 2024-12-03T14:45:41,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T14:45:41,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T14:45:41,570 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T14:45:41,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T14:45:41,582 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T14:45:41,583 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64372895, jitterRate=-0.04076911509037018}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T14:45:41,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733237141545Initializing all the Stores at 1733237141547 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237141547Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237141547Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237141547Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237141547Cleaning up temporary data from old regions at 1733237141569 (+22 ms)Region opened successfully at 1733237141586 (+17 ms) 2024-12-03T14:45:41,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T14:45:41,587 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T14:45:41,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T14:45:41,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T14:45:41,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T14:45:41,589 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T14:45:41,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733237141587Disabling compacts and flushes for region at 1733237141587Disabling writes for close at 1733237141587Writing region close event to WAL at 1733237141588 (+1 ms)Closed at 1733237141589 (+1 ms) 2024-12-03T14:45:41,593 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:41,593 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T14:45:41,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T14:45:41,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T14:45:41,614 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T14:45:41,628 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,43511,1733237140004 with port=37335, startcode=1733237140672 2024-12-03T14:45:41,628 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,43511,1733237140004 with port=36997, startcode=1733237140546 2024-12-03T14:45:41,628 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,43511,1733237140004 with port=43899, startcode=1733237140626 2024-12-03T14:45:41,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:41,632 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] master.ServerManager(517): Registering regionserver=a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:41,639 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:41,639 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] master.ServerManager(517): Registering regionserver=a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:41,639 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920 2024-12-03T14:45:41,639 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37511 2024-12-03T14:45:41,639 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T14:45:41,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:41,642 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920 2024-12-03T14:45:41,642 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43511 {}] master.ServerManager(517): Registering regionserver=a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:41,642 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37511 2024-12-03T14:45:41,642 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T14:45:41,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:41,645 DEBUG [RS:0;a5d22df9eca2:36997 {}] zookeeper.ZKUtil(111): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:41,645 WARN [RS:0;a5d22df9eca2:36997 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:41,645 INFO [RS:0;a5d22df9eca2:36997 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T14:45:41,645 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:41,645 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920 2024-12-03T14:45:41,645 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37511 2024-12-03T14:45:41,645 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T14:45:41,647 DEBUG [RS:1;a5d22df9eca2:43899 {}] zookeeper.ZKUtil(111): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:41,647 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a5d22df9eca2,36997,1733237140546] 2024-12-03T14:45:41,647 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a5d22df9eca2,43899,1733237140626] 2024-12-03T14:45:41,647 WARN [RS:1;a5d22df9eca2:43899 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:41,647 INFO [RS:1;a5d22df9eca2:43899 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T14:45:41,647 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:41,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:41,648 DEBUG [RS:2;a5d22df9eca2:37335 {}] zookeeper.ZKUtil(111): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:41,648 WARN [RS:2;a5d22df9eca2:37335 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:41,648 INFO [RS:2;a5d22df9eca2:37335 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T14:45:41,649 DEBUG [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:41,649 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a5d22df9eca2,37335,1733237140672] 2024-12-03T14:45:41,673 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T14:45:41,673 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T14:45:41,673 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T14:45:41,688 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T14:45:41,688 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T14:45:41,688 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T14:45:41,694 INFO [RS:2;a5d22df9eca2:37335 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T14:45:41,694 INFO [RS:1;a5d22df9eca2:43899 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T14:45:41,694 INFO [RS:0;a5d22df9eca2:36997 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T14:45:41,694 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,694 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,694 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,695 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T14:45:41,695 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T14:45:41,695 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T14:45:41,701 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T14:45:41,701 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T14:45:41,701 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T14:45:41,702 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,702 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,702 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,703 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,703 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:41,703 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:41,703 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:41,704 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:41,704 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:41,704 DEBUG [RS:1;a5d22df9eca2:43899 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:41,705 DEBUG [RS:2;a5d22df9eca2:37335 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:41,705 DEBUG [RS:0;a5d22df9eca2:36997 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:41,709 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,709 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,37335,1733237140672-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,36997,1733237140546-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:41,710 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,711 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43899,1733237140626-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:41,727 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T14:45:41,729 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43899,1733237140626-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,730 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,730 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.Replication(171): a5d22df9eca2,43899,1733237140626 started 2024-12-03T14:45:41,732 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T14:45:41,732 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T14:45:41,732 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,37335,1733237140672-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,732 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,36997,1733237140546-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,733 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,733 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.Replication(171): a5d22df9eca2,36997,1733237140546 started 2024-12-03T14:45:41,733 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,733 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.Replication(171): a5d22df9eca2,37335,1733237140672 started 2024-12-03T14:45:41,749 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,749 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1482): Serving as a5d22df9eca2,43899,1733237140626, RpcServer on a5d22df9eca2/172.17.0.2:43899, sessionid=0x100a08b43c40002 2024-12-03T14:45:41,750 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T14:45:41,750 DEBUG [RS:1;a5d22df9eca2:43899 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:41,751 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,43899,1733237140626' 2024-12-03T14:45:41,751 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T14:45:41,752 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T14:45:41,753 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T14:45:41,753 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T14:45:41,753 DEBUG [RS:1;a5d22df9eca2:43899 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:41,753 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,43899,1733237140626' 2024-12-03T14:45:41,753 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T14:45:41,754 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T14:45:41,754 DEBUG [RS:1;a5d22df9eca2:43899 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T14:45:41,754 INFO [RS:1;a5d22df9eca2:43899 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T14:45:41,754 INFO [RS:1;a5d22df9eca2:43899 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T14:45:41,755 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,755 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:41,755 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(1482): Serving as a5d22df9eca2,37335,1733237140672, RpcServer on a5d22df9eca2/172.17.0.2:37335, sessionid=0x100a08b43c40003 2024-12-03T14:45:41,755 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1482): Serving as a5d22df9eca2,36997,1733237140546, RpcServer on a5d22df9eca2/172.17.0.2:36997, sessionid=0x100a08b43c40001 2024-12-03T14:45:41,756 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T14:45:41,756 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T14:45:41,756 DEBUG [RS:2;a5d22df9eca2:37335 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:41,756 DEBUG [RS:0;a5d22df9eca2:36997 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:41,756 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,37335,1733237140672' 2024-12-03T14:45:41,756 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,36997,1733237140546' 2024-12-03T14:45:41,756 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T14:45:41,756 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T14:45:41,757 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T14:45:41,757 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T14:45:41,758 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T14:45:41,758 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T14:45:41,758 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T14:45:41,758 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T14:45:41,758 DEBUG [RS:2;a5d22df9eca2:37335 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:41,758 DEBUG [RS:0;a5d22df9eca2:36997 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:41,758 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,37335,1733237140672' 2024-12-03T14:45:41,758 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,36997,1733237140546' 2024-12-03T14:45:41,758 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T14:45:41,758 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T14:45:41,759 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T14:45:41,759 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T14:45:41,759 DEBUG [RS:2;a5d22df9eca2:37335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T14:45:41,759 DEBUG [RS:0;a5d22df9eca2:36997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T14:45:41,759 INFO [RS:2;a5d22df9eca2:37335 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T14:45:41,759 INFO [RS:0;a5d22df9eca2:36997 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T14:45:41,759 INFO [RS:2;a5d22df9eca2:37335 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T14:45:41,759 INFO [RS:0;a5d22df9eca2:36997 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T14:45:41,765 WARN [a5d22df9eca2:43511 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T14:45:41,860 INFO [RS:1;a5d22df9eca2:43899 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T14:45:41,860 INFO [RS:0;a5d22df9eca2:36997 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T14:45:41,860 INFO [RS:2;a5d22df9eca2:37335 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T14:45:41,863 INFO [RS:0;a5d22df9eca2:36997 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C36997%2C1733237140546, suffix=, logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,36997,1733237140546, archiveDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs, maxLogs=32 2024-12-03T14:45:41,864 INFO [RS:2;a5d22df9eca2:37335 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C37335%2C1733237140672, suffix=, logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,37335,1733237140672, archiveDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs, maxLogs=32 2024-12-03T14:45:41,864 INFO [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C43899%2C1733237140626, suffix=, logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626, archiveDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs, maxLogs=32 2024-12-03T14:45:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-03T14:45:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-03T14:45:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-03T14:45:41,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-03T14:45:41,917 DEBUG [RS:2;a5d22df9eca2:37335 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,37335,1733237140672/a5d22df9eca2%2C37335%2C1733237140672.1733237141867, exclude list is [], retry=0 2024-12-03T14:45:41,918 DEBUG [RS:1;a5d22df9eca2:43899 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626/a5d22df9eca2%2C43899%2C1733237140626.1733237141867, exclude list is [], retry=0 2024-12-03T14:45:41,918 DEBUG [RS:0;a5d22df9eca2:36997 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,36997,1733237140546/a5d22df9eca2%2C36997%2C1733237140546.1733237141867, exclude list is [], retry=0 2024-12-03T14:45:41,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44665,DS-f35aca29-109e-4999-b0d1-5548398dfdee,DISK] 2024-12-03T14:45:41,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44445,DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11,DISK] 2024-12-03T14:45:41,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39911,DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62,DISK] 2024-12-03T14:45:41,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44445,DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11,DISK] 2024-12-03T14:45:41,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44665,DS-f35aca29-109e-4999-b0d1-5548398dfdee,DISK] 2024-12-03T14:45:41,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44665,DS-f35aca29-109e-4999-b0d1-5548398dfdee,DISK] 2024-12-03T14:45:41,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39911,DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62,DISK] 2024-12-03T14:45:41,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44445,DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11,DISK] 2024-12-03T14:45:41,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39911,DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62,DISK] 2024-12-03T14:45:41,929 INFO [RS:2;a5d22df9eca2:37335 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,37335,1733237140672/a5d22df9eca2%2C37335%2C1733237140672.1733237141867 2024-12-03T14:45:41,930 DEBUG [RS:2;a5d22df9eca2:37335 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43401:43401),(127.0.0.1/127.0.0.1:40167:40167),(127.0.0.1/127.0.0.1:43505:43505)] 2024-12-03T14:45:41,931 INFO [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626/a5d22df9eca2%2C43899%2C1733237140626.1733237141867 2024-12-03T14:45:41,931 INFO [RS:0;a5d22df9eca2:36997 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,36997,1733237140546/a5d22df9eca2%2C36997%2C1733237140546.1733237141867 2024-12-03T14:45:41,931 DEBUG [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43505:43505),(127.0.0.1/127.0.0.1:40167:40167),(127.0.0.1/127.0.0.1:43401:43401)] 2024-12-03T14:45:41,936 DEBUG [RS:0;a5d22df9eca2:36997 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43401:43401),(127.0.0.1/127.0.0.1:40167:40167),(127.0.0.1/127.0.0.1:43505:43505)] 2024-12-03T14:45:41,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-03T14:45:41,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-03T14:45:41,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-03T14:45:41,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-03T14:45:42,019 DEBUG [a5d22df9eca2:43511 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-03T14:45:42,032 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(204): Hosts are {a5d22df9eca2=0} racks are {/default-rack=0} 2024-12-03T14:45:42,038 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T14:45:42,038 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T14:45:42,038 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T14:45:42,038 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T14:45:42,038 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T14:45:42,038 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T14:45:42,038 INFO [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T14:45:42,038 INFO [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T14:45:42,038 INFO [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T14:45:42,039 DEBUG [a5d22df9eca2:43511 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T14:45:42,044 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:42,051 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a5d22df9eca2,43899,1733237140626, state=OPENING 2024-12-03T14:45:42,055 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T14:45:42,056 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:42,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:42,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:42,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:42,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,058 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T14:45:42,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a5d22df9eca2,43899,1733237140626}] 2024-12-03T14:45:42,239 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T14:45:42,241 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33093, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T14:45:42,254 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T14:45:42,255 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T14:45:42,255 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-03T14:45:42,259 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C43899%2C1733237140626.meta, suffix=.meta, logDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626, archiveDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs, maxLogs=32 2024-12-03T14:45:42,273 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626/a5d22df9eca2%2C43899%2C1733237140626.meta.1733237142261.meta, exclude list is [], retry=0 2024-12-03T14:45:42,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44665,DS-f35aca29-109e-4999-b0d1-5548398dfdee,DISK] 2024-12-03T14:45:42,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39911,DS-b9b5b26b-9ccb-4a26-8fbf-b6669cecbb62,DISK] 2024-12-03T14:45:42,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44445,DS-1a800fb1-2cb2-41f4-a2d9-916fd8977e11,DISK] 2024-12-03T14:45:42,280 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/WALs/a5d22df9eca2,43899,1733237140626/a5d22df9eca2%2C43899%2C1733237140626.meta.1733237142261.meta 2024-12-03T14:45:42,281 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43401:43401),(127.0.0.1/127.0.0.1:40167:40167),(127.0.0.1/127.0.0.1:43505:43505)] 2024-12-03T14:45:42,281 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T14:45:42,283 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T14:45:42,286 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T14:45:42,290 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T14:45:42,294 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T14:45:42,294 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:42,295 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T14:45:42,295 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T14:45:42,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T14:45:42,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T14:45:42,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:42,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:42,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T14:45:42,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T14:45:42,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:42,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:42,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T14:45:42,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T14:45:42,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:42,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:42,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T14:45:42,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T14:45:42,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:42,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:42,307 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T14:45:42,309 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740 2024-12-03T14:45:42,311 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740 2024-12-03T14:45:42,313 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T14:45:42,313 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T14:45:42,314 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T14:45:42,316 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T14:45:42,317 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67467859, jitterRate=0.005349442362785339}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T14:45:42,318 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T14:45:42,319 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733237142295Writing region info on filesystem at 1733237142295Initializing all the Stores at 1733237142297 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237142297Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237142297Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237142297Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237142297Cleaning up temporary data from old regions at 1733237142313 (+16 ms)Running coprocessor post-open hooks at 1733237142318 (+5 ms)Region opened successfully at 1733237142319 (+1 ms) 2024-12-03T14:45:42,327 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733237142231 2024-12-03T14:45:42,338 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T14:45:42,338 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T14:45:42,339 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:42,342 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a5d22df9eca2,43899,1733237140626, state=OPEN 2024-12-03T14:45:42,344 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:42,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:42,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:42,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:42,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:42,344 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:42,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T14:45:42,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a5d22df9eca2,43899,1733237140626 in 285 msec 2024-12-03T14:45:42,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T14:45:42,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 751 msec 2024-12-03T14:45:42,358 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:42,358 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T14:45:42,378 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T14:45:42,379 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a5d22df9eca2,43899,1733237140626, seqNum=-1] 2024-12-03T14:45:42,398 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T14:45:42,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35361, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T14:45:42,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0350 sec 2024-12-03T14:45:42,448 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733237142448, completionTime=-1 2024-12-03T14:45:42,452 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-03T14:45:42,452 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T14:45:42,483 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-03T14:45:42,483 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733237202483 2024-12-03T14:45:42,484 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733237262483 2024-12-03T14:45:42,484 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 31 msec 2024-12-03T14:45:42,485 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-03T14:45:42,491 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43511,1733237140004-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,491 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43511,1733237140004-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,491 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43511,1733237140004-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,493 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a5d22df9eca2:43511, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,493 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,495 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,500 DEBUG [master/a5d22df9eca2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T14:45:42,519 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.772sec 2024-12-03T14:45:42,520 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T14:45:42,521 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T14:45:42,522 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T14:45:42,522 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T14:45:42,523 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T14:45:42,523 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43511,1733237140004-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:42,524 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43511,1733237140004-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T14:45:42,528 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T14:45:42,529 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T14:45:42,529 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,43511,1733237140004-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:42,615 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72d63dcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T14:45:42,619 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T14:45:42,620 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T14:45:42,623 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a5d22df9eca2,43511,-1 for getting cluster id 2024-12-03T14:45:42,626 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T14:45:42,633 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6d1ec690-d930-4976-b6c6-286627144007' 2024-12-03T14:45:42,636 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T14:45:42,636 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6d1ec690-d930-4976-b6c6-286627144007" 2024-12-03T14:45:42,636 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2550a746, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T14:45:42,636 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a5d22df9eca2,43511,-1] 2024-12-03T14:45:42,639 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T14:45:42,640 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:42,642 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T14:45:42,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4253f156, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T14:45:42,646 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T14:45:42,654 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a5d22df9eca2,43899,1733237140626, seqNum=-1] 2024-12-03T14:45:42,654 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T14:45:42,657 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T14:45:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:42,681 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T14:45:42,686 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:42,688 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@24a81538 2024-12-03T14:45:42,689 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T14:45:42,692 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T14:45:42,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T14:45:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-03T14:45:42,710 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T14:45:42,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-03T14:45:42,712 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:42,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T14:45:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:42,731 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:42,732 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:42,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:34906 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:44445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34906 dst: /127.0.0.1:44445 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:42,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-03T14:45:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:43,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:43,151 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:43,153 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f96aedf855bc271d0eb215c591d95ea8, NAME => 'TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920 2024-12-03T14:45:43,160 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:43,160 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:43,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:37320 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37320 dst: /127.0.0.1:39911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:43,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-03T14:45:43,174 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:43,175 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:43,175 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing f96aedf855bc271d0eb215c591d95ea8, disabling compactions & flushes 2024-12-03T14:45:43,175 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,175 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,175 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. after waiting 0 ms 2024-12-03T14:45:43,175 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,175 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for f96aedf855bc271d0eb215c591d95ea8: Waiting for close lock at 1733237143175Disabling compacts and flushes for region at 1733237143175Disabling writes for close at 1733237143175Writing region close event to WAL at 1733237143175Closed at 1733237143175 2024-12-03T14:45:43,178 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T14:45:43,182 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733237143178"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733237143178"}]},"ts":"1733237143178"} 2024-12-03T14:45:43,187 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T14:45:43,189 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T14:45:43,192 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733237143189"}]},"ts":"1733237143189"} 2024-12-03T14:45:43,197 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-03T14:45:43,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {a5d22df9eca2=0} racks are {/default-rack=0} 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T14:45:43,199 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T14:45:43,199 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T14:45:43,199 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T14:45:43,199 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T14:45:43,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f96aedf855bc271d0eb215c591d95ea8, ASSIGN}] 2024-12-03T14:45:43,203 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f96aedf855bc271d0eb215c591d95ea8, ASSIGN 2024-12-03T14:45:43,205 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f96aedf855bc271d0eb215c591d95ea8, ASSIGN; state=OFFLINE, location=a5d22df9eca2,36997,1733237140546; forceNewPlan=false, retain=false 2024-12-03T14:45:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:43,357 INFO [a5d22df9eca2:43511 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T14:45:43,358 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f96aedf855bc271d0eb215c591d95ea8, regionState=OPENING, regionLocation=a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:43,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f96aedf855bc271d0eb215c591d95ea8, ASSIGN because future has completed 2024-12-03T14:45:43,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f96aedf855bc271d0eb215c591d95ea8, server=a5d22df9eca2,36997,1733237140546}] 2024-12-03T14:45:43,519 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T14:45:43,521 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50809, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T14:45:43,527 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,528 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f96aedf855bc271d0eb215c591d95ea8, NAME => 'TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8.', STARTKEY => '', ENDKEY => ''} 2024-12-03T14:45:43,528 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,528 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:43,528 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,528 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,531 INFO [StoreOpener-f96aedf855bc271d0eb215c591d95ea8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,533 INFO [StoreOpener-f96aedf855bc271d0eb215c591d95ea8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f96aedf855bc271d0eb215c591d95ea8 columnFamilyName cf 2024-12-03T14:45:43,533 DEBUG [StoreOpener-f96aedf855bc271d0eb215c591d95ea8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:43,534 INFO [StoreOpener-f96aedf855bc271d0eb215c591d95ea8-1 {}] regionserver.HStore(327): Store=f96aedf855bc271d0eb215c591d95ea8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:43,535 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,536 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,536 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,537 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,537 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,540 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,545 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T14:45:43,546 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f96aedf855bc271d0eb215c591d95ea8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73928841, jitterRate=0.10162557661533356}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T14:45:43,546 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:43,547 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f96aedf855bc271d0eb215c591d95ea8: Running coprocessor pre-open hook at 1733237143529Writing region info on filesystem at 1733237143529Initializing all the Stores at 1733237143530 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237143530Cleaning up temporary data from old regions at 1733237143537 (+7 ms)Running coprocessor post-open hooks at 1733237143547 (+10 ms)Region opened successfully at 1733237143547 2024-12-03T14:45:43,549 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8., pid=6, masterSystemTime=1733237143518 2024-12-03T14:45:43,553 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,553 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:43,554 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f96aedf855bc271d0eb215c591d95ea8, regionState=OPEN, openSeqNum=2, regionLocation=a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:43,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f96aedf855bc271d0eb215c591d95ea8, server=a5d22df9eca2,36997,1733237140546 because future has completed 2024-12-03T14:45:43,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T14:45:43,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f96aedf855bc271d0eb215c591d95ea8, server=a5d22df9eca2,36997,1733237140546 in 197 msec 2024-12-03T14:45:43,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T14:45:43,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f96aedf855bc271d0eb215c591d95ea8, ASSIGN in 365 msec 2024-12-03T14:45:43,573 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T14:45:43,574 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733237143574"}]},"ts":"1733237143574"} 2024-12-03T14:45:43,578 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-03T14:45:43,579 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T14:45:43,583 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 878 msec 2024-12-03T14:45:43,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:43,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-03T14:45:43,861 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T14:45:43,862 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T14:45:43,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-03T14:45:43,869 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T14:45:43,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-03T14:45:43,879 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8., hostname=a5d22df9eca2,36997,1733237140546, seqNum=2] 2024-12-03T14:45:43,881 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T14:45:43,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T14:45:43,894 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-03T14:45:43,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-03T14:45:43,902 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-03T14:45:43,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:43,905 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T14:45:43,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T14:45:44,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:44,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T14:45:44,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,073 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing f96aedf855bc271d0eb215c591d95ea8 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-03T14:45:44,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/.tmp/cf/a60c4188b550413db1af63491fe67f08 is 36, key is row/cf:cq/1733237143885/Put/seqid=0 2024-12-03T14:45:44,148 WARN [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,148 WARN [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,155 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_451089443_22 at /127.0.0.1:45470 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45470 dst: /127.0.0.1:44665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:44,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-03T14:45:44,165 WARN [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:44,165 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/.tmp/cf/a60c4188b550413db1af63491fe67f08 2024-12-03T14:45:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:44,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/.tmp/cf/a60c4188b550413db1af63491fe67f08 as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/cf/a60c4188b550413db1af63491fe67f08 2024-12-03T14:45:44,233 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/cf/a60c4188b550413db1af63491fe67f08, entries=1, sequenceid=5, filesize=4.7 K 2024-12-03T14:45:44,242 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for f96aedf855bc271d0eb215c591d95ea8 in 168ms, sequenceid=5, compaction requested=false 2024-12-03T14:45:44,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-03T14:45:44,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for f96aedf855bc271d0eb215c591d95ea8: 2024-12-03T14:45:44,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T14:45:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T14:45:44,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T14:45:44,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 345 msec 2024-12-03T14:45:44,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 361 msec 2024-12-03T14:45:44,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:44,530 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T14:45:44,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T14:45:44,545 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T14:45:44,545 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:44,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,551 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T14:45:44,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T14:45:44,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469019133, stopped=false 2024-12-03T14:45:44,552 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a5d22df9eca2,43511,1733237140004 2024-12-03T14:45:44,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:44,553 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:44,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:44,554 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:44,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:44,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:44,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:44,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:44,554 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T14:45:44,554 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:44,554 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:44,554 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:44,554 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T14:45:44,555 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:44,555 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,555 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:44,555 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a5d22df9eca2,36997,1733237140546' ***** 2024-12-03T14:45:44,555 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T14:45:44,555 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a5d22df9eca2,43899,1733237140626' ***** 2024-12-03T14:45:44,555 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T14:45:44,555 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a5d22df9eca2,37335,1733237140672' ***** 2024-12-03T14:45:44,555 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T14:45:44,555 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T14:45:44,556 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T14:45:44,556 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T14:45:44,557 INFO [RS:0;a5d22df9eca2:36997 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T14:45:44,557 INFO [RS:2;a5d22df9eca2:37335 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T14:45:44,557 INFO [RS:1;a5d22df9eca2:43899 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T14:45:44,557 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T14:45:44,557 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T14:45:44,557 INFO [RS:1;a5d22df9eca2:43899 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T14:45:44,557 INFO [RS:0;a5d22df9eca2:36997 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T14:45:44,557 INFO [RS:2;a5d22df9eca2:37335 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T14:45:44,557 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(959): stopping server a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:44,557 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(959): stopping server a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:44,557 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T14:45:44,557 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:44,557 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:44,557 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(3091): Received CLOSE for f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:44,557 INFO [RS:2;a5d22df9eca2:37335 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a5d22df9eca2:37335. 2024-12-03T14:45:44,557 INFO [RS:1;a5d22df9eca2:43899 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a5d22df9eca2:43899. 2024-12-03T14:45:44,558 DEBUG [RS:2;a5d22df9eca2:37335 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:44,558 DEBUG [RS:1;a5d22df9eca2:43899 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:44,558 DEBUG [RS:2;a5d22df9eca2:37335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,558 DEBUG [RS:1;a5d22df9eca2:43899 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,558 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(976): stopping server a5d22df9eca2,37335,1733237140672; all regions closed. 2024-12-03T14:45:44,558 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(959): stopping server a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:44,558 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:44,558 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T14:45:44,558 INFO [RS:0;a5d22df9eca2:36997 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a5d22df9eca2:36997. 2024-12-03T14:45:44,558 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T14:45:44,558 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T14:45:44,558 DEBUG [RS:0;a5d22df9eca2:36997 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:44,558 DEBUG [RS:0;a5d22df9eca2:36997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,558 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T14:45:44,558 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f96aedf855bc271d0eb215c591d95ea8, disabling compactions & flushes 2024-12-03T14:45:44,558 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T14:45:44,559 INFO [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,559 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1325): Online Regions={f96aedf855bc271d0eb215c591d95ea8=TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8.} 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. after waiting 0 ms 2024-12-03T14:45:44,559 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,559 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T14:45:44,559 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T14:45:44,559 DEBUG [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1351): Waiting on f96aedf855bc271d0eb215c591d95ea8 2024-12-03T14:45:44,559 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T14:45:44,559 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T14:45:44,559 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-03T14:45:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_1073741826_1016 (size=93) 2024-12-03T14:45:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741826_1016 (size=93) 2024-12-03T14:45:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_1073741826_1016 (size=93) 2024-12-03T14:45:44,579 DEBUG [RS:2;a5d22df9eca2:37335 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs 2024-12-03T14:45:44,580 INFO [RS:2;a5d22df9eca2:37335 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a5d22df9eca2%2C37335%2C1733237140672:(num 1733237141867) 2024-12-03T14:45:44,580 DEBUG [RS:2;a5d22df9eca2:37335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,580 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:44,580 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:44,580 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.ChoreService(370): Chore service for: regionserver/a5d22df9eca2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:44,581 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T14:45:44,581 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T14:45:44,581 INFO [regionserver/a5d22df9eca2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:44,581 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T14:45:44,581 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/default/TestHBaseWalOnEC/f96aedf855bc271d0eb215c591d95ea8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T14:45:44,581 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:44,581 INFO [RS:2;a5d22df9eca2:37335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37335 2024-12-03T14:45:44,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a5d22df9eca2,37335,1733237140672 2024-12-03T14:45:44,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:44,585 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:44,593 INFO [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,594 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f96aedf855bc271d0eb215c591d95ea8: Waiting for close lock at 1733237144558Running coprocessor pre-close hooks at 1733237144558Disabling compacts and flushes for region at 1733237144558Disabling writes for close at 1733237144559 (+1 ms)Writing region close event to WAL at 1733237144561 (+2 ms)Running coprocessor post-close hooks at 1733237144586 (+25 ms)Closed at 1733237144593 (+7 ms) 2024-12-03T14:45:44,594 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8. 2024-12-03T14:45:44,594 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a5d22df9eca2,37335,1733237140672] 2024-12-03T14:45:44,595 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a5d22df9eca2,37335,1733237140672 already deleted, retry=false 2024-12-03T14:45:44,596 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a5d22df9eca2,37335,1733237140672 expired; onlineServers=2 2024-12-03T14:45:44,601 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/info/16387cdff8c04356a7cc9aae9104a9be is 153, key is TestHBaseWalOnEC,,1733237142693.f96aedf855bc271d0eb215c591d95ea8./info:regioninfo/1733237143554/Put/seqid=0 2024-12-03T14:45:44,604 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,604 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,609 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_702176451_22 at /127.0.0.1:45494 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45494 dst: /127.0.0.1:44665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:44,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-03T14:45:44,617 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:44,617 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/info/16387cdff8c04356a7cc9aae9104a9be 2024-12-03T14:45:44,624 INFO [regionserver/a5d22df9eca2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:44,625 INFO [regionserver/a5d22df9eca2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:44,625 INFO [regionserver/a5d22df9eca2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:44,645 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/ns/5719fe15cdd64975bee77e1e7e6cd346 is 43, key is default/ns:d/1733237142406/Put/seqid=0 2024-12-03T14:45:44,648 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,648 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_702176451_22 at /127.0.0.1:45514 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:44665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45514 dst: /127.0.0.1:44665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:44,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-03T14:45:44,657 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:44,657 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/ns/5719fe15cdd64975bee77e1e7e6cd346 2024-12-03T14:45:44,686 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/table/1a59043ce4c64682a33babf8acf99d46 is 52, key is TestHBaseWalOnEC/table:state/1733237143574/Put/seqid=0 2024-12-03T14:45:44,688 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,689 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:44,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_702176451_22 at /127.0.0.1:45536 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45536 dst: /127.0.0.1:44665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:44,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:44,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37335-0x100a08b43c40003, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:44,695 INFO [RS:2;a5d22df9eca2:37335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:44,695 INFO [RS:2;a5d22df9eca2:37335 {}] regionserver.HRegionServer(1031): Exiting; stopping=a5d22df9eca2,37335,1733237140672; zookeeper connection closed. 2024-12-03T14:45:44,695 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b3c1da {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b3c1da 2024-12-03T14:45:44,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-03T14:45:44,697 WARN [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:44,698 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/table/1a59043ce4c64682a33babf8acf99d46 2024-12-03T14:45:44,709 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/info/16387cdff8c04356a7cc9aae9104a9be as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/info/16387cdff8c04356a7cc9aae9104a9be 2024-12-03T14:45:44,711 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T14:45:44,711 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T14:45:44,722 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/info/16387cdff8c04356a7cc9aae9104a9be, entries=10, sequenceid=11, filesize=6.5 K 2024-12-03T14:45:44,724 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/ns/5719fe15cdd64975bee77e1e7e6cd346 as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/ns/5719fe15cdd64975bee77e1e7e6cd346 2024-12-03T14:45:44,738 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/ns/5719fe15cdd64975bee77e1e7e6cd346, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T14:45:44,740 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/.tmp/table/1a59043ce4c64682a33babf8acf99d46 as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/table/1a59043ce4c64682a33babf8acf99d46 2024-12-03T14:45:44,759 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/table/1a59043ce4c64682a33babf8acf99d46, entries=2, sequenceid=11, filesize=5.1 K 2024-12-03T14:45:44,759 DEBUG [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T14:45:44,759 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(976): stopping server a5d22df9eca2,36997,1733237140546; all regions closed. 2024-12-03T14:45:44,761 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 201ms, sequenceid=11, compaction requested=false 2024-12-03T14:45:44,761 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T14:45:44,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_1073741828_1018 (size=1298) 2024-12-03T14:45:44,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_1073741828_1018 (size=1298) 2024-12-03T14:45:44,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741828_1018 (size=1298) 2024-12-03T14:45:44,773 DEBUG [RS:0;a5d22df9eca2:36997 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs 2024-12-03T14:45:44,773 INFO [RS:0;a5d22df9eca2:36997 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a5d22df9eca2%2C36997%2C1733237140546:(num 1733237141867) 2024-12-03T14:45:44,774 DEBUG [RS:0;a5d22df9eca2:36997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.ChoreService(370): Chore service for: regionserver/a5d22df9eca2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T14:45:44,774 INFO [regionserver/a5d22df9eca2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:44,774 INFO [RS:0;a5d22df9eca2:36997 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36997 2024-12-03T14:45:44,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:44,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a5d22df9eca2,36997,1733237140546 2024-12-03T14:45:44,776 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:44,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a5d22df9eca2,36997,1733237140546] 2024-12-03T14:45:44,778 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a5d22df9eca2,36997,1733237140546 already deleted, retry=false 2024-12-03T14:45:44,779 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a5d22df9eca2,36997,1733237140546 expired; onlineServers=1 2024-12-03T14:45:44,792 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T14:45:44,792 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T14:45:44,792 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T14:45:44,793 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T14:45:44,793 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T14:45:44,793 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733237144559Running coprocessor pre-close hooks at 1733237144559Disabling compacts and flushes for region at 1733237144559Disabling writes for close at 1733237144559Obtaining lock to block concurrent updates at 1733237144559Preparing flush snapshotting stores in 1588230740 at 1733237144559Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733237144560 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733237144565 (+5 ms)Flushing 1588230740/info: creating writer at 1733237144565Flushing 1588230740/info: appending metadata at 1733237144598 (+33 ms)Flushing 1588230740/info: closing flushed file at 1733237144598Flushing 1588230740/ns: creating writer at 1733237144630 (+32 ms)Flushing 1588230740/ns: appending metadata at 1733237144644 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733237144644Flushing 1588230740/table: creating writer at 1733237144667 (+23 ms)Flushing 1588230740/table: appending metadata at 1733237144684 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733237144684Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43885ad2: reopening flushed file at 1733237144708 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1784a1d6: reopening flushed file at 1733237144722 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@399142a0: reopening flushed file at 1733237144738 (+16 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 201ms, sequenceid=11, compaction requested=false at 1733237144761 (+23 ms)Writing region close event to WAL at 1733237144774 (+13 ms)Running coprocessor post-close hooks at 1733237144793 (+19 ms)Closed at 1733237144793 2024-12-03T14:45:44,793 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T14:45:44,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-03T14:45:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-03T14:45:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-03T14:45:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775644_1025 (size=4787) 2024-12-03T14:45:44,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775645_1025 (size=4787) 2024-12-03T14:45:44,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-03T14:45:44,878 INFO [RS:0;a5d22df9eca2:36997 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:44,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:44,878 INFO [RS:0;a5d22df9eca2:36997 {}] regionserver.HRegionServer(1031): Exiting; stopping=a5d22df9eca2,36997,1733237140546; zookeeper connection closed. 2024-12-03T14:45:44,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36997-0x100a08b43c40001, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:44,878 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6ad38dd3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6ad38dd3 2024-12-03T14:45:44,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-03T14:45:44,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775661_1023 (size=51) 2024-12-03T14:45:44,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-03T14:45:44,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_-9223372036854775660_1023 (size=51) 2024-12-03T14:45:44,960 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(976): stopping server a5d22df9eca2,43899,1733237140626; all regions closed. 2024-12-03T14:45:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_1073741829_1019 (size=2751) 2024-12-03T14:45:44,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741829_1019 (size=2751) 2024-12-03T14:45:44,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_1073741829_1019 (size=2751) 2024-12-03T14:45:44,969 DEBUG [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs 2024-12-03T14:45:44,969 INFO [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a5d22df9eca2%2C43899%2C1733237140626.meta:.meta(num 1733237142261) 2024-12-03T14:45:44,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741827_1017 (size=93) 2024-12-03T14:45:44,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_1073741827_1017 (size=93) 2024-12-03T14:45:44,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_1073741827_1017 (size=93) 2024-12-03T14:45:44,982 DEBUG [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/oldWALs 2024-12-03T14:45:44,982 INFO [RS:1;a5d22df9eca2:43899 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a5d22df9eca2%2C43899%2C1733237140626:(num 1733237141867) 2024-12-03T14:45:44,982 DEBUG [RS:1;a5d22df9eca2:43899 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:44,982 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:44,983 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:44,983 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.ChoreService(370): Chore service for: regionserver/a5d22df9eca2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:44,983 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:44,983 INFO [regionserver/a5d22df9eca2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:44,983 INFO [RS:1;a5d22df9eca2:43899 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43899 2024-12-03T14:45:44,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:44,985 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a5d22df9eca2,43899,1733237140626 2024-12-03T14:45:44,985 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:44,986 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a5d22df9eca2,43899,1733237140626] 2024-12-03T14:45:44,986 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a5d22df9eca2,43899,1733237140626 already deleted, retry=false 2024-12-03T14:45:44,986 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a5d22df9eca2,43899,1733237140626 expired; onlineServers=0 2024-12-03T14:45:44,986 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a5d22df9eca2,43511,1733237140004' ***** 2024-12-03T14:45:44,989 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T14:45:44,989 INFO [M:0;a5d22df9eca2:43511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:44,989 INFO [M:0;a5d22df9eca2:43511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:44,989 DEBUG [M:0;a5d22df9eca2:43511 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T14:45:44,989 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T14:45:44,990 DEBUG [M:0;a5d22df9eca2:43511 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T14:45:44,990 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.large.0-1733237141507 {}] cleaner.HFileCleaner(306): Exit Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.large.0-1733237141507,5,FailOnTimeoutGroup] 2024-12-03T14:45:44,990 INFO [M:0;a5d22df9eca2:43511 {}] hbase.ChoreService(370): Chore service for: master/a5d22df9eca2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:44,990 INFO [M:0;a5d22df9eca2:43511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:44,990 DEBUG [M:0;a5d22df9eca2:43511 {}] master.HMaster(1795): Stopping service threads 2024-12-03T14:45:44,990 INFO [M:0;a5d22df9eca2:43511 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T14:45:44,990 INFO [M:0;a5d22df9eca2:43511 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T14:45:44,991 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.small.0-1733237141508 {}] cleaner.HFileCleaner(306): Exit Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.small.0-1733237141508,5,FailOnTimeoutGroup] 2024-12-03T14:45:44,991 INFO [M:0;a5d22df9eca2:43511 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T14:45:44,991 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T14:45:44,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:44,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:44,996 DEBUG [M:0;a5d22df9eca2:43511 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-03T14:45:44,996 DEBUG [M:0;a5d22df9eca2:43511 {}] master.ActiveMasterManager(353): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-03T14:45:44,997 INFO [M:0;a5d22df9eca2:43511 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/.lastflushedseqids 2024-12-03T14:45:45,007 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,007 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:37380 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37380 dst: /127.0.0.1:39911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:45,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-03T14:45:45,016 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:45,016 INFO [M:0;a5d22df9eca2:43511 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T14:45:45,016 INFO [M:0;a5d22df9eca2:43511 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T14:45:45,016 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T14:45:45,016 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:45,016 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:45,017 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T14:45:45,017 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:45,017 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-03T14:45:45,050 DEBUG [M:0;a5d22df9eca2:43511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b522fd22bd1549e5ac37853ed4a67740 is 82, key is hbase:meta,,1/info:regioninfo/1733237142339/Put/seqid=0 2024-12-03T14:45:45,052 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,052 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,058 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:34958 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:44445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34958 dst: /127.0.0.1:44445 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:45,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-03T14:45:45,063 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:45,063 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b522fd22bd1549e5ac37853ed4a67740 2024-12-03T14:45:45,094 DEBUG [M:0;a5d22df9eca2:43511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4b89edb24ab4323bd8bdbdaebd58f6b is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733237143582/Put/seqid=0 2024-12-03T14:45:45,095 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:45,095 INFO [RS:1;a5d22df9eca2:43899 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:45,095 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43899-0x100a08b43c40002, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:45,095 INFO [RS:1;a5d22df9eca2:43899 {}] regionserver.HRegionServer(1031): Exiting; stopping=a5d22df9eca2,43899,1733237140626; zookeeper connection closed. 2024-12-03T14:45:45,097 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,097 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,099 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@47cf5b16 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@47cf5b16 2024-12-03T14:45:45,099 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-03T14:45:45,106 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:34974 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:44445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34974 dst: /127.0.0.1:44445 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:45,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-03T14:45:45,111 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:45,112 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4b89edb24ab4323bd8bdbdaebd58f6b 2024-12-03T14:45:45,138 DEBUG [M:0;a5d22df9eca2:43511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbd2d99081dd4bb8a13dc9251d01226b is 69, key is a5d22df9eca2,36997,1733237140546/rs:state/1733237141634/Put/seqid=0 2024-12-03T14:45:45,140 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,140 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-03T14:45:45,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-604540236_22 at /127.0.0.1:37398 [Receiving block BP-1130478452-172.17.0.2-1733237137316:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37398 dst: /127.0.0.1:39911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T14:45:45,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-03T14:45:45,150 WARN [M:0;a5d22df9eca2:43511 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-03T14:45:45,150 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbd2d99081dd4bb8a13dc9251d01226b 2024-12-03T14:45:45,159 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b522fd22bd1549e5ac37853ed4a67740 as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b522fd22bd1549e5ac37853ed4a67740 2024-12-03T14:45:45,167 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b522fd22bd1549e5ac37853ed4a67740, entries=8, sequenceid=72, filesize=5.5 K 2024-12-03T14:45:45,169 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4b89edb24ab4323bd8bdbdaebd58f6b as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4b89edb24ab4323bd8bdbdaebd58f6b 2024-12-03T14:45:45,177 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4b89edb24ab4323bd8bdbdaebd58f6b, entries=8, sequenceid=72, filesize=6.3 K 2024-12-03T14:45:45,179 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbd2d99081dd4bb8a13dc9251d01226b as hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fbd2d99081dd4bb8a13dc9251d01226b 2024-12-03T14:45:45,187 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fbd2d99081dd4bb8a13dc9251d01226b, entries=3, sequenceid=72, filesize=5.2 K 2024-12-03T14:45:45,188 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=72, compaction requested=false 2024-12-03T14:45:45,189 INFO [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:45,189 DEBUG [M:0;a5d22df9eca2:43511 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733237145016Disabling compacts and flushes for region at 1733237145016Disabling writes for close at 1733237145017 (+1 ms)Obtaining lock to block concurrent updates at 1733237145017Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733237145017Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733237145017Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733237145021 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733237145022 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733237145049 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733237145049Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733237145072 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733237145094 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733237145094Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733237145120 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733237145137 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733237145138 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29374a7f: reopening flushed file at 1733237145157 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@180c07b9: reopening flushed file at 1733237145167 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d426713: reopening flushed file at 1733237145177 (+10 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=72, compaction requested=false at 1733237145188 (+11 ms)Writing region close event to WAL at 1733237145189 (+1 ms)Closed at 1733237145189 2024-12-03T14:45:45,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741825_1011 (size=32674) 2024-12-03T14:45:45,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39911 is added to blk_1073741825_1011 (size=32674) 2024-12-03T14:45:45,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44665 is added to blk_1073741825_1011 (size=32674) 2024-12-03T14:45:45,194 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:45,194 INFO [M:0;a5d22df9eca2:43511 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T14:45:45,194 INFO [M:0;a5d22df9eca2:43511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43511 2024-12-03T14:45:45,195 INFO [M:0;a5d22df9eca2:43511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:45,298 INFO [M:0;a5d22df9eca2:43511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:45,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:45,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43511-0x100a08b43c40000, quorum=127.0.0.1:55238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:45,304 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:45,307 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:45,307 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:45,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:45,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:45,310 WARN [BP-1130478452-172.17.0.2-1733237137316 heartbeating to localhost/127.0.0.1:37511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T14:45:45,310 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T14:45:45,311 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T14:45:45,311 WARN [BP-1130478452-172.17.0.2-1733237137316 heartbeating to localhost/127.0.0.1:37511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1130478452-172.17.0.2-1733237137316 (Datanode Uuid dd709686-6852-47e2-8aa3-3e6c5f534ca8) service to localhost/127.0.0.1:37511 2024-12-03T14:45:45,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data5/current/BP-1130478452-172.17.0.2-1733237137316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:45,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data6/current/BP-1130478452-172.17.0.2-1733237137316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:45,313 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T14:45:45,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:45,317 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:45,317 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:45,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:45,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:45,321 WARN [BP-1130478452-172.17.0.2-1733237137316 heartbeating to localhost/127.0.0.1:37511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T14:45:45,321 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T14:45:45,321 WARN [BP-1130478452-172.17.0.2-1733237137316 heartbeating to localhost/127.0.0.1:37511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1130478452-172.17.0.2-1733237137316 (Datanode Uuid 9dffe93c-c4b5-43e6-9399-62f92947c400) service to localhost/127.0.0.1:37511 2024-12-03T14:45:45,321 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T14:45:45,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data3/current/BP-1130478452-172.17.0.2-1733237137316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:45,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data4/current/BP-1130478452-172.17.0.2-1733237137316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:45,322 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T14:45:45,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:45,325 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:45,325 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:45,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:45,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:45,327 WARN [BP-1130478452-172.17.0.2-1733237137316 heartbeating to localhost/127.0.0.1:37511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T14:45:45,327 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T14:45:45,327 WARN [BP-1130478452-172.17.0.2-1733237137316 heartbeating to localhost/127.0.0.1:37511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1130478452-172.17.0.2-1733237137316 (Datanode Uuid 75c034ed-3f31-43fa-b73b-adccae630ca5) service to localhost/127.0.0.1:37511 2024-12-03T14:45:45,327 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T14:45:45,328 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data1/current/BP-1130478452-172.17.0.2-1733237137316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:45,328 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/cluster_8fad7c22-bf48-2f41-3971-8ab29681a933/data/data2/current/BP-1130478452-172.17.0.2-1733237137316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:45,328 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T14:45:45,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T14:45:45,337 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:45,337 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:45,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:45,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:45,348 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T14:45:45,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T14:45:45,397 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 156), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=333 (was 310) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5224 (was 5608) 2024-12-03T14:45:45,405 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=333, ProcessCount=11, AvailableMemoryMB=5223 2024-12-03T14:45:45,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.log.dir so I do NOT create it in target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/22694a39-cc27-49af-2944-cd26834890a8/hadoop.tmp.dir so I do NOT create it in target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475, deleteOnExit=true 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/test.cache.data in system properties and HBase conf 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir in system properties and HBase conf 2024-12-03T14:45:45,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T14:45:45,407 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T14:45:45,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/nfs.dump.dir in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/java.io.tmpdir in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T14:45:45,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T14:45:45,486 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:45,492 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:45,497 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:45,497 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:45,498 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T14:45:45,498 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:45,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ad8d9de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:45,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e58533{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:45,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15027254{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/java.io.tmpdir/jetty-localhost-41893-hadoop-hdfs-3_4_1-tests_jar-_-any-10140026885470017731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T14:45:45,594 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4293887f{HTTP/1.1, (http/1.1)}{localhost:41893} 2024-12-03T14:45:45,594 INFO [Time-limited test {}] server.Server(415): Started @10323ms 2024-12-03T14:45:45,653 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:45,656 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:45,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:45,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:45,657 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T14:45:45,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c4ebd49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:45,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72f96008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:45,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18f854cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/java.io.tmpdir/jetty-localhost-34625-hadoop-hdfs-3_4_1-tests_jar-_-any-8776839062425411456/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:45,747 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70fdfe33{HTTP/1.1, (http/1.1)}{localhost:34625} 2024-12-03T14:45:45,747 INFO [Time-limited test {}] server.Server(415): Started @10476ms 2024-12-03T14:45:45,749 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T14:45:45,777 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:45,781 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:45,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:45,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:45,783 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T14:45:45,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4148d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:45,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55cf3a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:45,805 WARN [Thread-538 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data1/current/BP-1770701912-172.17.0.2-1733237145441/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:45,805 WARN [Thread-539 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data2/current/BP-1770701912-172.17.0.2-1733237145441/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:45,824 WARN [Thread-517 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T14:45:45,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7038ae991e2641d with lease ID 0x25cdbc86b39e0dd7: Processing first storage report for DS-275b8b3c-1cf5-4bc4-88e6-f090729140a4 from datanode DatanodeRegistration(127.0.0.1:35547, datanodeUuid=7aa017bd-052e-4b56-bbb5-9271075ea031, infoPort=37357, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441) 2024-12-03T14:45:45,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7038ae991e2641d with lease ID 0x25cdbc86b39e0dd7: from storage DS-275b8b3c-1cf5-4bc4-88e6-f090729140a4 node DatanodeRegistration(127.0.0.1:35547, datanodeUuid=7aa017bd-052e-4b56-bbb5-9271075ea031, infoPort=37357, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:45,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7038ae991e2641d with lease ID 0x25cdbc86b39e0dd7: Processing first storage report for DS-04297f55-a468-4b74-be24-82dfe0625bea from datanode DatanodeRegistration(127.0.0.1:35547, datanodeUuid=7aa017bd-052e-4b56-bbb5-9271075ea031, infoPort=37357, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441) 2024-12-03T14:45:45,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7038ae991e2641d with lease ID 0x25cdbc86b39e0dd7: from storage DS-04297f55-a468-4b74-be24-82dfe0625bea node DatanodeRegistration(127.0.0.1:35547, datanodeUuid=7aa017bd-052e-4b56-bbb5-9271075ea031, infoPort=37357, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:45,879 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46f4cd0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/java.io.tmpdir/jetty-localhost-35631-hadoop-hdfs-3_4_1-tests_jar-_-any-6414848244958704979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:45,880 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@700b2317{HTTP/1.1, (http/1.1)}{localhost:35631} 2024-12-03T14:45:45,880 INFO [Time-limited test {}] server.Server(415): Started @10608ms 2024-12-03T14:45:45,881 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T14:45:45,911 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T14:45:45,914 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T14:45:45,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T14:45:45,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T14:45:45,916 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T14:45:45,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44968fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,AVAILABLE} 2024-12-03T14:45:45,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40b03519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T14:45:45,936 WARN [Thread-573 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data3/current/BP-1770701912-172.17.0.2-1733237145441/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:45,936 WARN [Thread-574 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data4/current/BP-1770701912-172.17.0.2-1733237145441/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:45,951 WARN [Thread-553 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T14:45:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96cde81332d74a7a with lease ID 0x25cdbc86b39e0dd8: Processing first storage report for DS-13927832-bde5-4ef2-8a66-c672468997cd from datanode DatanodeRegistration(127.0.0.1:45965, datanodeUuid=f741b13e-b0e9-4f2b-8805-76233ceb2a1a, infoPort=34903, infoSecurePort=0, ipcPort=45481, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441) 2024-12-03T14:45:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96cde81332d74a7a with lease ID 0x25cdbc86b39e0dd8: from storage DS-13927832-bde5-4ef2-8a66-c672468997cd node DatanodeRegistration(127.0.0.1:45965, datanodeUuid=f741b13e-b0e9-4f2b-8805-76233ceb2a1a, infoPort=34903, infoSecurePort=0, ipcPort=45481, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T14:45:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96cde81332d74a7a with lease ID 0x25cdbc86b39e0dd8: Processing first storage report for DS-fbafd396-605c-46dc-9f5d-5a92eaab9dc2 from datanode DatanodeRegistration(127.0.0.1:45965, datanodeUuid=f741b13e-b0e9-4f2b-8805-76233ceb2a1a, infoPort=34903, infoSecurePort=0, ipcPort=45481, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441) 2024-12-03T14:45:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96cde81332d74a7a with lease ID 0x25cdbc86b39e0dd8: from storage DS-fbafd396-605c-46dc-9f5d-5a92eaab9dc2 node DatanodeRegistration(127.0.0.1:45965, datanodeUuid=f741b13e-b0e9-4f2b-8805-76233ceb2a1a, infoPort=34903, infoSecurePort=0, ipcPort=45481, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:46,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f50f857{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/java.io.tmpdir/jetty-localhost-46551-hadoop-hdfs-3_4_1-tests_jar-_-any-11210534342167319487/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:46,011 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7eeef71e{HTTP/1.1, (http/1.1)}{localhost:46551} 2024-12-03T14:45:46,011 INFO [Time-limited test {}] server.Server(415): Started @10739ms 2024-12-03T14:45:46,012 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T14:45:46,071 WARN [Thread-599 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data5/current/BP-1770701912-172.17.0.2-1733237145441/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:46,071 WARN [Thread-600 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data6/current/BP-1770701912-172.17.0.2-1733237145441/current, will proceed with Du for space computation calculation, 2024-12-03T14:45:46,088 WARN [Thread-588 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T14:45:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69bb4e75eb315b66 with lease ID 0x25cdbc86b39e0dd9: Processing first storage report for DS-636d7131-4386-4c60-bd11-6fde2253d0b7 from datanode DatanodeRegistration(127.0.0.1:43853, datanodeUuid=aefcb56c-3079-4c8c-8591-48e0f889c700, infoPort=42599, infoSecurePort=0, ipcPort=35023, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441) 2024-12-03T14:45:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69bb4e75eb315b66 with lease ID 0x25cdbc86b39e0dd9: from storage DS-636d7131-4386-4c60-bd11-6fde2253d0b7 node DatanodeRegistration(127.0.0.1:43853, datanodeUuid=aefcb56c-3079-4c8c-8591-48e0f889c700, infoPort=42599, infoSecurePort=0, ipcPort=35023, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:46,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69bb4e75eb315b66 with lease ID 0x25cdbc86b39e0dd9: Processing first storage report for DS-125fa465-f6a7-4adc-af80-5a00cd313026 from datanode DatanodeRegistration(127.0.0.1:43853, datanodeUuid=aefcb56c-3079-4c8c-8591-48e0f889c700, infoPort=42599, infoSecurePort=0, ipcPort=35023, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441) 2024-12-03T14:45:46,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69bb4e75eb315b66 with lease ID 0x25cdbc86b39e0dd9: from storage DS-125fa465-f6a7-4adc-af80-5a00cd313026 node DatanodeRegistration(127.0.0.1:43853, datanodeUuid=aefcb56c-3079-4c8c-8591-48e0f889c700, infoPort=42599, infoSecurePort=0, ipcPort=35023, storageInfo=lv=-57;cid=testClusterID;nsid=1674522848;c=1733237145441), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T14:45:46,138 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96 2024-12-03T14:45:46,140 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/zookeeper_0, clientPort=53112, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T14:45:46,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53112 2024-12-03T14:45:46,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741825_1001 (size=7) 2024-12-03T14:45:46,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741825_1001 (size=7) 2024-12-03T14:45:46,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741825_1001 (size=7) 2024-12-03T14:45:46,158 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909 with version=8 2024-12-03T14:45:46,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37511/user/jenkins/test-data/7ba59e13-a672-227e-fa42-17cdff63d920/hbase-staging 2024-12-03T14:45:46,162 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:46,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,162 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:46,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:46,163 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T14:45:46,163 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:46,164 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45807 2024-12-03T14:45:46,166 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45807 connecting to ZooKeeper ensemble=127.0.0.1:53112 2024-12-03T14:45:46,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458070x0, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:46,171 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45807-0x100a08b5e8a0000 connected 2024-12-03T14:45:46,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:46,190 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909, hbase.cluster.distributed=false 2024-12-03T14:45:46,192 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:46,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45807 2024-12-03T14:45:46,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45807 2024-12-03T14:45:46,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45807 2024-12-03T14:45:46,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45807 2024-12-03T14:45:46,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45807 2024-12-03T14:45:46,214 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T14:45:46,215 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:46,216 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40067 2024-12-03T14:45:46,218 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40067 connecting to ZooKeeper ensemble=127.0.0.1:53112 2024-12-03T14:45:46,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400670x0, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:46,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:400670x0, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:46,229 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40067-0x100a08b5e8a0001 connected 2024-12-03T14:45:46,230 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T14:45:46,233 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T14:45:46,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T14:45:46,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:46,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40067 2024-12-03T14:45:46,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40067 2024-12-03T14:45:46,242 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40067 2024-12-03T14:45:46,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40067 2024-12-03T14:45:46,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40067 2024-12-03T14:45:46,269 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:46,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,270 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:46,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:46,270 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T14:45:46,270 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:46,271 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37509 2024-12-03T14:45:46,273 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37509 connecting to ZooKeeper ensemble=127.0.0.1:53112 2024-12-03T14:45:46,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,283 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375090x0, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:46,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375090x0, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:46,284 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T14:45:46,289 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37509-0x100a08b5e8a0002 connected 2024-12-03T14:45:46,294 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T14:45:46,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T14:45:46,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:46,299 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37509 2024-12-03T14:45:46,300 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37509 2024-12-03T14:45:46,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37509 2024-12-03T14:45:46,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37509 2024-12-03T14:45:46,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37509 2024-12-03T14:45:46,325 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a5d22df9eca2:0 server-side Connection retries=45 2024-12-03T14:45:46,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,325 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T14:45:46,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T14:45:46,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T14:45:46,326 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T14:45:46,326 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T14:45:46,329 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34577 2024-12-03T14:45:46,331 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34577 connecting to ZooKeeper ensemble=127.0.0.1:53112 2024-12-03T14:45:46,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345770x0, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T14:45:46,348 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:345770x0, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:46,348 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T14:45:46,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34577-0x100a08b5e8a0003 connected 2024-12-03T14:45:46,350 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T14:45:46,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T14:45:46,353 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T14:45:46,357 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34577 2024-12-03T14:45:46,358 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34577 2024-12-03T14:45:46,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34577 2024-12-03T14:45:46,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34577 2024-12-03T14:45:46,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34577 2024-12-03T14:45:46,380 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a5d22df9eca2:45807 2024-12-03T14:45:46,380 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,382 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,385 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,387 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:46,387 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,388 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T14:45:46,388 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a5d22df9eca2,45807,1733237146162 from backup master directory 2024-12-03T14:45:46,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,389 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T14:45:46,394 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:46,394 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,403 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/hbase.id] with ID: 46de3b6f-3a4a-4bfc-a730-d80fe981deba 2024-12-03T14:45:46,403 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/.tmp/hbase.id 2024-12-03T14:45:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741826_1002 (size=42) 2024-12-03T14:45:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741826_1002 (size=42) 2024-12-03T14:45:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741826_1002 (size=42) 2024-12-03T14:45:46,430 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/.tmp/hbase.id]:[hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/hbase.id] 2024-12-03T14:45:46,461 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T14:45:46,461 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T14:45:46,463 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T14:45:46,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,465 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741827_1003 (size=196) 2024-12-03T14:45:46,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741827_1003 (size=196) 2024-12-03T14:45:46,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741827_1003 (size=196) 2024-12-03T14:45:46,490 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T14:45:46,491 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T14:45:46,493 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T14:45:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741828_1004 (size=1189) 2024-12-03T14:45:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741828_1004 (size=1189) 2024-12-03T14:45:46,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741828_1004 (size=1189) 2024-12-03T14:45:46,512 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store 2024-12-03T14:45:46,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741829_1005 (size=34) 2024-12-03T14:45:46,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741829_1005 (size=34) 2024-12-03T14:45:46,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741829_1005 (size=34) 2024-12-03T14:45:46,524 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:46,525 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T14:45:46,525 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:46,525 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:46,525 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T14:45:46,525 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:46,525 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:46,525 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733237146525Disabling compacts and flushes for region at 1733237146525Disabling writes for close at 1733237146525Writing region close event to WAL at 1733237146525Closed at 1733237146525 2024-12-03T14:45:46,527 WARN [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/.initializing 2024-12-03T14:45:46,527 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/WALs/a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,532 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C45807%2C1733237146162, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/WALs/a5d22df9eca2,45807,1733237146162, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/oldWALs, maxLogs=10 2024-12-03T14:45:46,533 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a5d22df9eca2%2C45807%2C1733237146162.1733237146533 2024-12-03T14:45:46,547 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/WALs/a5d22df9eca2,45807,1733237146162/a5d22df9eca2%2C45807%2C1733237146162.1733237146533 2024-12-03T14:45:46,549 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42599:42599),(127.0.0.1/127.0.0.1:37357:37357),(127.0.0.1/127.0.0.1:34903:34903)] 2024-12-03T14:45:46,555 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T14:45:46,555 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:46,555 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,555 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T14:45:46,560 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:46,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T14:45:46,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:46,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T14:45:46,570 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:46,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T14:45:46,574 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:46,575 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,576 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,577 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,579 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,579 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,580 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T14:45:46,582 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T14:45:46,585 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T14:45:46,587 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75388975, jitterRate=0.12338326871395111}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T14:45:46,588 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733237146556Initializing all the Stores at 1733237146557 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237146557Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237146558 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237146558Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237146558Cleaning up temporary data from old regions at 1733237146579 (+21 ms)Region opened successfully at 1733237146588 (+9 ms) 2024-12-03T14:45:46,593 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T14:45:46,602 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@724fa7ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:46,603 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T14:45:46,604 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T14:45:46,604 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T14:45:46,604 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T14:45:46,605 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T14:45:46,605 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T14:45:46,605 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T14:45:46,608 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T14:45:46,610 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T14:45:46,610 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T14:45:46,611 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T14:45:46,612 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T14:45:46,613 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T14:45:46,613 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T14:45:46,614 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T14:45:46,615 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T14:45:46,616 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T14:45:46,617 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T14:45:46,619 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T14:45:46,619 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T14:45:46,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:46,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:46,621 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:46,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,621 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:46,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,622 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a5d22df9eca2,45807,1733237146162, sessionid=0x100a08b5e8a0000, setting cluster-up flag (Was=false) 2024-12-03T14:45:46,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,623 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,627 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T14:45:46,627 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,630 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:46,635 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T14:45:46,636 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:46,638 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T14:45:46,641 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:46,641 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T14:45:46,641 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T14:45:46,642 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a5d22df9eca2,45807,1733237146162 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T14:45:46,643 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:46,643 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:46,643 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:46,643 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=5, maxPoolSize=5 2024-12-03T14:45:46,643 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a5d22df9eca2:0, corePoolSize=10, maxPoolSize=10 2024-12-03T14:45:46,643 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,644 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:46,644 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,645 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733237176645 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T14:45:46,646 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:46,646 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,646 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T14:45:46,647 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T14:45:46,647 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T14:45:46,647 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T14:45:46,647 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T14:45:46,647 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T14:45:46,648 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,648 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T14:45:46,649 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.large.0-1733237146647,5,FailOnTimeoutGroup] 2024-12-03T14:45:46,652 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.small.0-1733237146649,5,FailOnTimeoutGroup] 2024-12-03T14:45:46,652 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,652 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T14:45:46,652 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,652 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741831_1007 (size=1321) 2024-12-03T14:45:46,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741831_1007 (size=1321) 2024-12-03T14:45:46,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741831_1007 (size=1321) 2024-12-03T14:45:46,660 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T14:45:46,660 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909 2024-12-03T14:45:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741832_1008 (size=32) 2024-12-03T14:45:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741832_1008 (size=32) 2024-12-03T14:45:46,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741832_1008 (size=32) 2024-12-03T14:45:46,669 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(746): ClusterId : 46de3b6f-3a4a-4bfc-a730-d80fe981deba 2024-12-03T14:45:46,669 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T14:45:46,669 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(746): ClusterId : 46de3b6f-3a4a-4bfc-a730-d80fe981deba 2024-12-03T14:45:46,669 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T14:45:46,669 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(746): ClusterId : 46de3b6f-3a4a-4bfc-a730-d80fe981deba 2024-12-03T14:45:46,669 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T14:45:46,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:46,671 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T14:45:46,671 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T14:45:46,671 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T14:45:46,671 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T14:45:46,672 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T14:45:46,672 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T14:45:46,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T14:45:46,673 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T14:45:46,674 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T14:45:46,674 DEBUG [RS:1;a5d22df9eca2:37509 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ca8090, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:46,674 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T14:45:46,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T14:45:46,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,674 DEBUG [RS:2;a5d22df9eca2:34577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f1decaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:46,674 DEBUG [RS:0;a5d22df9eca2:40067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@641f6d0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a5d22df9eca2/172.17.0.2:0 2024-12-03T14:45:46,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:46,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T14:45:46,677 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T14:45:46,677 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:46,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T14:45:46,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T14:45:46,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:46,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T14:45:46,686 DEBUG [RS:1;a5d22df9eca2:37509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a5d22df9eca2:37509 2024-12-03T14:45:46,686 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T14:45:46,686 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T14:45:46,686 DEBUG [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T14:45:46,687 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a5d22df9eca2:34577 2024-12-03T14:45:46,687 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T14:45:46,687 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T14:45:46,687 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T14:45:46,687 DEBUG [RS:0;a5d22df9eca2:40067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a5d22df9eca2:40067 2024-12-03T14:45:46,687 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T14:45:46,687 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T14:45:46,687 DEBUG [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T14:45:46,688 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,45807,1733237146162 with port=37509, startcode=1733237146269 2024-12-03T14:45:46,688 DEBUG [RS:1;a5d22df9eca2:37509 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T14:45:46,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T14:45:46,688 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:46,688 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,45807,1733237146162 with port=34577, startcode=1733237146324 2024-12-03T14:45:46,688 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(2659): reportForDuty to master=a5d22df9eca2,45807,1733237146162 with port=40067, startcode=1733237146214 2024-12-03T14:45:46,688 DEBUG [RS:2;a5d22df9eca2:34577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T14:45:46,688 DEBUG [RS:0;a5d22df9eca2:40067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T14:45:46,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:46,689 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T14:45:46,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740 2024-12-03T14:45:46,691 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740 2024-12-03T14:45:46,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T14:45:46,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T14:45:46,693 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T14:45:46,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T14:45:46,696 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T14:45:46,696 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46541, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T14:45:46,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45807 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:46,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45807 {}] master.ServerManager(517): Registering regionserver=a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:46,697 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52089, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T14:45:46,699 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45807 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:46,699 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45807 {}] master.ServerManager(517): Registering regionserver=a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:46,699 DEBUG [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909 2024-12-03T14:45:46,699 DEBUG [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40139 2024-12-03T14:45:46,699 DEBUG [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T14:45:46,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:46,701 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45807 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:46,701 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45807 {}] master.ServerManager(517): Registering regionserver=a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:46,702 DEBUG [RS:0;a5d22df9eca2:40067 {}] zookeeper.ZKUtil(111): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:46,702 WARN [RS:0;a5d22df9eca2:40067 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:46,702 INFO [RS:0;a5d22df9eca2:40067 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T14:45:46,702 DEBUG [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:46,702 DEBUG [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909 2024-12-03T14:45:46,702 DEBUG [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40139 2024-12-03T14:45:46,702 DEBUG [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T14:45:46,702 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a5d22df9eca2,40067,1733237146214] 2024-12-03T14:45:46,702 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T14:45:46,703 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69168593, jitterRate=0.03069235384464264}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T14:45:46,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733237146670Initializing all the Stores at 1733237146671 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237146671Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237146672 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237146672Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237146672Cleaning up temporary data from old regions at 1733237146693 (+21 ms)Region opened successfully at 1733237146704 (+11 ms) 2024-12-03T14:45:46,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T14:45:46,705 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T14:45:46,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T14:45:46,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T14:45:46,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:46,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T14:45:46,706 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909 2024-12-03T14:45:46,706 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40139 2024-12-03T14:45:46,706 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T14:45:46,706 DEBUG [RS:1;a5d22df9eca2:37509 {}] zookeeper.ZKUtil(111): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:46,706 WARN [RS:1;a5d22df9eca2:37509 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:46,706 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a5d22df9eca2,37509,1733237146269] 2024-12-03T14:45:46,706 INFO [RS:1;a5d22df9eca2:37509 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T14:45:46,706 DEBUG [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:46,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:46,708 DEBUG [RS:2;a5d22df9eca2:34577 {}] zookeeper.ZKUtil(111): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:46,708 WARN [RS:2;a5d22df9eca2:34577 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T14:45:46,708 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a5d22df9eca2,34577,1733237146324] 2024-12-03T14:45:46,708 INFO [RS:2;a5d22df9eca2:34577 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T14:45:46,708 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:46,713 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T14:45:46,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733237146705Disabling compacts and flushes for region at 1733237146705Disabling writes for close at 1733237146705Writing region close event to WAL at 1733237146713 (+8 ms)Closed at 1733237146713 2024-12-03T14:45:46,715 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:46,715 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T14:45:46,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T14:45:46,718 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T14:45:46,720 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T14:45:46,720 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T14:45:46,721 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T14:45:46,721 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T14:45:46,723 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T14:45:46,725 INFO [RS:0;a5d22df9eca2:40067 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T14:45:46,725 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,729 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T14:45:46,730 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T14:45:46,733 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T14:45:46,734 INFO [RS:1;a5d22df9eca2:37509 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T14:45:46,734 INFO [RS:2;a5d22df9eca2:34577 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T14:45:46,734 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,734 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,734 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T14:45:46,734 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,734 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,734 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,734 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,734 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:46,735 DEBUG [RS:0;a5d22df9eca2:40067 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:46,735 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T14:45:46,735 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T14:45:46,736 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T14:45:46,736 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T14:45:46,737 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,737 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a5d22df9eca2:0, corePoolSize=2, maxPoolSize=2 2024-12-03T14:45:46,737 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,737 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a5d22df9eca2:0, corePoolSize=1, maxPoolSize=1 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:46,738 DEBUG [RS:1;a5d22df9eca2:37509 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:46,738 DEBUG [RS:2;a5d22df9eca2:34577 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0, corePoolSize=3, maxPoolSize=3 2024-12-03T14:45:46,739 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,739 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,739 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,740 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,740 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,740 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,40067,1733237146214-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,34577,1733237146324-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:46,745 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,37509,1733237146269-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:46,758 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T14:45:46,758 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,40067,1733237146214-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,759 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,760 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.Replication(171): a5d22df9eca2,40067,1733237146214 started 2024-12-03T14:45:46,766 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T14:45:46,766 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,37509,1733237146269-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,766 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T14:45:46,766 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,766 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,34577,1733237146324-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,766 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.Replication(171): a5d22df9eca2,37509,1733237146269 started 2024-12-03T14:45:46,766 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,766 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.Replication(171): a5d22df9eca2,34577,1733237146324 started 2024-12-03T14:45:46,777 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,777 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(1482): Serving as a5d22df9eca2,40067,1733237146214, RpcServer on a5d22df9eca2/172.17.0.2:40067, sessionid=0x100a08b5e8a0001 2024-12-03T14:45:46,777 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T14:45:46,777 DEBUG [RS:0;a5d22df9eca2:40067 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:46,777 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,40067,1733237146214' 2024-12-03T14:45:46,777 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T14:45:46,778 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,40067,1733237146214' 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T14:45:46,779 DEBUG [RS:0;a5d22df9eca2:40067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T14:45:46,779 INFO [RS:0;a5d22df9eca2:40067 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T14:45:46,780 INFO [RS:0;a5d22df9eca2:40067 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T14:45:46,780 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,780 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1482): Serving as a5d22df9eca2,34577,1733237146324, RpcServer on a5d22df9eca2/172.17.0.2:34577, sessionid=0x100a08b5e8a0003 2024-12-03T14:45:46,780 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T14:45:46,780 DEBUG [RS:2;a5d22df9eca2:34577 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:46,780 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,34577,1733237146324' 2024-12-03T14:45:46,780 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T14:45:46,780 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:46,781 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(1482): Serving as a5d22df9eca2,37509,1733237146269, RpcServer on a5d22df9eca2/172.17.0.2:37509, sessionid=0x100a08b5e8a0002 2024-12-03T14:45:46,781 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T14:45:46,781 DEBUG [RS:1;a5d22df9eca2:37509 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:46,781 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,37509,1733237146269' 2024-12-03T14:45:46,781 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T14:45:46,781 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T14:45:46,781 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T14:45:46,781 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T14:45:46,781 DEBUG [RS:2;a5d22df9eca2:34577 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:46,781 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T14:45:46,781 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,34577,1733237146324' 2024-12-03T14:45:46,781 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T14:45:46,782 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T14:45:46,782 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T14:45:46,782 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T14:45:46,782 DEBUG [RS:1;a5d22df9eca2:37509 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:46,782 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a5d22df9eca2,37509,1733237146269' 2024-12-03T14:45:46,782 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T14:45:46,782 DEBUG [RS:2;a5d22df9eca2:34577 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T14:45:46,782 INFO [RS:2;a5d22df9eca2:34577 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T14:45:46,782 INFO [RS:2;a5d22df9eca2:34577 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T14:45:46,782 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T14:45:46,783 DEBUG [RS:1;a5d22df9eca2:37509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T14:45:46,783 INFO [RS:1;a5d22df9eca2:37509 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T14:45:46,783 INFO [RS:1;a5d22df9eca2:37509 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T14:45:46,873 WARN [a5d22df9eca2:45807 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T14:45:46,882 INFO [RS:0;a5d22df9eca2:40067 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C40067%2C1733237146214, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,40067,1733237146214, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs, maxLogs=32 2024-12-03T14:45:46,884 INFO [RS:0;a5d22df9eca2:40067 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a5d22df9eca2%2C40067%2C1733237146214.1733237146884 2024-12-03T14:45:46,885 INFO [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C34577%2C1733237146324, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,34577,1733237146324, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs, maxLogs=32 2024-12-03T14:45:46,885 INFO [RS:1;a5d22df9eca2:37509 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C37509%2C1733237146269, suffix=, logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,37509,1733237146269, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs, maxLogs=32 2024-12-03T14:45:46,886 INFO [RS:1;a5d22df9eca2:37509 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a5d22df9eca2%2C37509%2C1733237146269.1733237146886 2024-12-03T14:45:46,886 INFO [RS:2;a5d22df9eca2:34577 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a5d22df9eca2%2C34577%2C1733237146324.1733237146886 2024-12-03T14:45:46,912 INFO [RS:0;a5d22df9eca2:40067 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,40067,1733237146214/a5d22df9eca2%2C40067%2C1733237146214.1733237146884 2024-12-03T14:45:46,912 INFO [RS:1;a5d22df9eca2:37509 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,37509,1733237146269/a5d22df9eca2%2C37509%2C1733237146269.1733237146886 2024-12-03T14:45:46,914 DEBUG [RS:0;a5d22df9eca2:40067 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42599:42599),(127.0.0.1/127.0.0.1:37357:37357),(127.0.0.1/127.0.0.1:34903:34903)] 2024-12-03T14:45:46,914 INFO [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,34577,1733237146324/a5d22df9eca2%2C34577%2C1733237146324.1733237146886 2024-12-03T14:45:46,915 DEBUG [RS:1;a5d22df9eca2:37509 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37357:37357),(127.0.0.1/127.0.0.1:34903:34903),(127.0.0.1/127.0.0.1:42599:42599)] 2024-12-03T14:45:46,922 DEBUG [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34903:34903),(127.0.0.1/127.0.0.1:42599:42599),(127.0.0.1/127.0.0.1:37357:37357)] 2024-12-03T14:45:47,123 DEBUG [a5d22df9eca2:45807 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-03T14:45:47,123 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(204): Hosts are {a5d22df9eca2=0} racks are {/default-rack=0} 2024-12-03T14:45:47,127 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T14:45:47,127 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T14:45:47,127 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T14:45:47,128 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T14:45:47,128 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T14:45:47,128 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T14:45:47,128 INFO [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T14:45:47,128 INFO [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T14:45:47,128 INFO [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T14:45:47,128 DEBUG [a5d22df9eca2:45807 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T14:45:47,128 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:47,131 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a5d22df9eca2,34577,1733237146324, state=OPENING 2024-12-03T14:45:47,132 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T14:45:47,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:47,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:47,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:47,134 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:47,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,134 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,135 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T14:45:47,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a5d22df9eca2,34577,1733237146324}] 2024-12-03T14:45:47,293 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T14:45:47,298 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49697, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T14:45:47,313 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T14:45:47,314 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T14:45:47,318 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a5d22df9eca2%2C34577%2C1733237146324.meta, suffix=.meta, logDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,34577,1733237146324, archiveDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs, maxLogs=32 2024-12-03T14:45:47,319 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a5d22df9eca2%2C34577%2C1733237146324.meta.1733237147319.meta 2024-12-03T14:45:47,375 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/WALs/a5d22df9eca2,34577,1733237146324/a5d22df9eca2%2C34577%2C1733237146324.meta.1733237147319.meta 2024-12-03T14:45:47,380 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37357:37357),(127.0.0.1/127.0.0.1:34903:34903),(127.0.0.1/127.0.0.1:42599:42599)] 2024-12-03T14:45:47,394 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T14:45:47,395 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T14:45:47,395 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T14:45:47,395 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T14:45:47,395 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T14:45:47,395 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:47,396 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T14:45:47,396 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T14:45:47,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T14:45:47,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T14:45:47,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:47,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:47,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T14:45:47,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T14:45:47,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:47,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:47,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T14:45:47,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T14:45:47,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:47,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:47,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T14:45:47,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T14:45:47,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:47,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T14:45:47,413 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T14:45:47,414 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740 2024-12-03T14:45:47,416 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740 2024-12-03T14:45:47,418 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T14:45:47,419 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T14:45:47,419 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T14:45:47,422 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T14:45:47,424 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60539194, jitterRate=-0.09789571166038513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T14:45:47,424 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T14:45:47,425 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733237147396Writing region info on filesystem at 1733237147396Initializing all the Stores at 1733237147398 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237147398Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237147401 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237147401Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733237147401Cleaning up temporary data from old regions at 1733237147419 (+18 ms)Running coprocessor post-open hooks at 1733237147424 (+5 ms)Region opened successfully at 1733237147424 2024-12-03T14:45:47,427 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733237147291 2024-12-03T14:45:47,431 DEBUG [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T14:45:47,432 INFO [RS_OPEN_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T14:45:47,433 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:47,435 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a5d22df9eca2,34577,1733237146324, state=OPEN 2024-12-03T14:45:47,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:47,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:47,437 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,437 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:47,438 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,438 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T14:45:47,438 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T14:45:47,439 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:47,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T14:45:47,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a5d22df9eca2,34577,1733237146324 in 304 msec 2024-12-03T14:45:47,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T14:45:47,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 731 msec 2024-12-03T14:45:47,453 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T14:45:47,453 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T14:45:47,454 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T14:45:47,454 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a5d22df9eca2,34577,1733237146324, seqNum=-1] 2024-12-03T14:45:47,455 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T14:45:47,456 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48091, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T14:45:47,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 827 msec 2024-12-03T14:45:47,469 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733237147469, completionTime=-1 2024-12-03T14:45:47,469 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-03T14:45:47,469 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T14:45:47,472 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-03T14:45:47,472 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733237207472 2024-12-03T14:45:47,472 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733237267472 2024-12-03T14:45:47,472 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T14:45:47,472 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-03T14:45:47,473 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,45807,1733237146162-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,473 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,45807,1733237146162-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,473 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,45807,1733237146162-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,473 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a5d22df9eca2:45807, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,473 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,473 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,476 DEBUG [master/a5d22df9eca2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T14:45:47,478 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.084sec 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,45807,1733237146162-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T14:45:47,479 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,45807,1733237146162-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T14:45:47,481 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T14:45:47,482 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T14:45:47,482 INFO [master/a5d22df9eca2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a5d22df9eca2,45807,1733237146162-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T14:45:47,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3562bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T14:45:47,570 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a5d22df9eca2,45807,-1 for getting cluster id 2024-12-03T14:45:47,570 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T14:45:47,572 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46de3b6f-3a4a-4bfc-a730-d80fe981deba' 2024-12-03T14:45:47,572 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T14:45:47,573 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46de3b6f-3a4a-4bfc-a730-d80fe981deba" 2024-12-03T14:45:47,573 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296b64f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T14:45:47,573 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a5d22df9eca2,45807,-1] 2024-12-03T14:45:47,573 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T14:45:47,574 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:47,575 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T14:45:47,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61e12713, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T14:45:47,577 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T14:45:47,578 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a5d22df9eca2,34577,1733237146324, seqNum=-1] 2024-12-03T14:45:47,579 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T14:45:47,583 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T14:45:47,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:47,587 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T14:45:47,589 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:47,589 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3c77cd1f 2024-12-03T14:45:47,590 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T14:45:47,595 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T14:45:47,598 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T14:45:47,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-03T14:45:47,603 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T14:45:47,603 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:47,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-03T14:45:47,606 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T14:45:47,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:47,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741837_1013 (size=392) 2024-12-03T14:45:47,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741837_1013 (size=392) 2024-12-03T14:45:47,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741837_1013 (size=392) 2024-12-03T14:45:47,628 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bde0e1116d914ce50608965220a717fb, NAME => 'TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909 2024-12-03T14:45:47,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741838_1014 (size=51) 2024-12-03T14:45:47,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741838_1014 (size=51) 2024-12-03T14:45:47,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741838_1014 (size=51) 2024-12-03T14:45:47,645 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:47,646 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing bde0e1116d914ce50608965220a717fb, disabling compactions & flushes 2024-12-03T14:45:47,646 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:47,646 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:47,646 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. after waiting 0 ms 2024-12-03T14:45:47,646 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:47,646 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:47,646 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for bde0e1116d914ce50608965220a717fb: Waiting for close lock at 1733237147646Disabling compacts and flushes for region at 1733237147646Disabling writes for close at 1733237147646Writing region close event to WAL at 1733237147646Closed at 1733237147646 2024-12-03T14:45:47,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T14:45:47,649 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733237147648"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733237147648"}]},"ts":"1733237147648"} 2024-12-03T14:45:47,652 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T14:45:47,654 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T14:45:47,654 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733237147654"}]},"ts":"1733237147654"} 2024-12-03T14:45:47,657 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-03T14:45:47,658 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {a5d22df9eca2=0} racks are {/default-rack=0} 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-03T14:45:47,659 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-03T14:45:47,659 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-03T14:45:47,659 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-03T14:45:47,659 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-03T14:45:47,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bde0e1116d914ce50608965220a717fb, ASSIGN}] 2024-12-03T14:45:47,662 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bde0e1116d914ce50608965220a717fb, ASSIGN 2024-12-03T14:45:47,664 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bde0e1116d914ce50608965220a717fb, ASSIGN; state=OFFLINE, location=a5d22df9eca2,34577,1733237146324; forceNewPlan=false, retain=false 2024-12-03T14:45:47,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:47,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T14:45:47,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T14:45:47,815 INFO [a5d22df9eca2:45807 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T14:45:47,815 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bde0e1116d914ce50608965220a717fb, regionState=OPENING, regionLocation=a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:47,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bde0e1116d914ce50608965220a717fb, ASSIGN because future has completed 2024-12-03T14:45:47,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bde0e1116d914ce50608965220a717fb, server=a5d22df9eca2,34577,1733237146324}] 2024-12-03T14:45:47,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T14:45:47,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T14:45:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:47,978 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:47,978 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bde0e1116d914ce50608965220a717fb, NAME => 'TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb.', STARTKEY => '', ENDKEY => ''} 2024-12-03T14:45:47,979 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,979 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T14:45:47,979 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,979 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,981 INFO [StoreOpener-bde0e1116d914ce50608965220a717fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,983 INFO [StoreOpener-bde0e1116d914ce50608965220a717fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bde0e1116d914ce50608965220a717fb columnFamilyName cf 2024-12-03T14:45:47,983 DEBUG [StoreOpener-bde0e1116d914ce50608965220a717fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T14:45:47,985 INFO [StoreOpener-bde0e1116d914ce50608965220a717fb-1 {}] regionserver.HStore(327): Store=bde0e1116d914ce50608965220a717fb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T14:45:47,985 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,987 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,987 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,988 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,988 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,990 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,993 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T14:45:47,994 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bde0e1116d914ce50608965220a717fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61715423, jitterRate=-0.0803685337305069}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T14:45:47,994 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:47,995 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bde0e1116d914ce50608965220a717fb: Running coprocessor pre-open hook at 1733237147979Writing region info on filesystem at 1733237147979Initializing all the Stores at 1733237147980 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733237147980Cleaning up temporary data from old regions at 1733237147988 (+8 ms)Running coprocessor post-open hooks at 1733237147994 (+6 ms)Region opened successfully at 1733237147995 (+1 ms) 2024-12-03T14:45:47,997 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb., pid=6, masterSystemTime=1733237147973 2024-12-03T14:45:48,000 DEBUG [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,000 INFO [RS_OPEN_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,001 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bde0e1116d914ce50608965220a717fb, regionState=OPEN, openSeqNum=2, regionLocation=a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:48,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bde0e1116d914ce50608965220a717fb, server=a5d22df9eca2,34577,1733237146324 because future has completed 2024-12-03T14:45:48,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T14:45:48,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bde0e1116d914ce50608965220a717fb, server=a5d22df9eca2,34577,1733237146324 in 188 msec 2024-12-03T14:45:48,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T14:45:48,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bde0e1116d914ce50608965220a717fb, ASSIGN in 353 msec 2024-12-03T14:45:48,018 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T14:45:48,018 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733237148018"}]},"ts":"1733237148018"} 2024-12-03T14:45:48,021 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-03T14:45:48,023 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T14:45:48,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 425 msec 2024-12-03T14:45:48,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T14:45:48,241 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T14:45:48,241 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-03T14:45:48,241 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T14:45:48,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-03T14:45:48,247 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T14:45:48,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-03T14:45:48,254 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb., hostname=a5d22df9eca2,34577,1733237146324, seqNum=2] 2024-12-03T14:45:48,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-03T14:45:48,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-03T14:45:48,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:48,271 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-03T14:45:48,273 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T14:45:48,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T14:45:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:48,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34577 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T14:45:48,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,436 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing bde0e1116d914ce50608965220a717fb 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-03T14:45:48,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/.tmp/cf/57006af5248d43d89dd76e9f9b261363 is 36, key is row/cf:cq/1733237148258/Put/seqid=0 2024-12-03T14:45:48,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741839_1015 (size=4787) 2024-12-03T14:45:48,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741839_1015 (size=4787) 2024-12-03T14:45:48,480 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/.tmp/cf/57006af5248d43d89dd76e9f9b261363 2024-12-03T14:45:48,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741839_1015 (size=4787) 2024-12-03T14:45:48,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/.tmp/cf/57006af5248d43d89dd76e9f9b261363 as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/cf/57006af5248d43d89dd76e9f9b261363 2024-12-03T14:45:48,504 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/cf/57006af5248d43d89dd76e9f9b261363, entries=1, sequenceid=5, filesize=4.7 K 2024-12-03T14:45:48,505 INFO [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for bde0e1116d914ce50608965220a717fb in 69ms, sequenceid=5, compaction requested=false 2024-12-03T14:45:48,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for bde0e1116d914ce50608965220a717fb: 2024-12-03T14:45:48,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a5d22df9eca2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T14:45:48,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T14:45:48,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T14:45:48,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 235 msec 2024-12-03T14:45:48,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 251 msec 2024-12-03T14:45:48,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45807 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T14:45:48,590 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-03T14:45:48,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T14:45:48,595 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T14:45:48,595 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:48,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,595 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T14:45:48,595 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T14:45:48,595 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1387451949, stopped=false 2024-12-03T14:45:48,595 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a5d22df9eca2,45807,1733237146162 2024-12-03T14:45:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:48,597 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T14:45:48,597 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:48,597 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T14:45:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:48,597 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T14:45:48,598 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:48,598 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:48,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:48,598 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a5d22df9eca2,40067,1733237146214' ***** 2024-12-03T14:45:48,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:48,598 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T14:45:48,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T14:45:48,598 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a5d22df9eca2,37509,1733237146269' ***** 2024-12-03T14:45:48,598 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T14:45:48,598 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a5d22df9eca2,34577,1733237146324' ***** 2024-12-03T14:45:48,598 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T14:45:48,598 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T14:45:48,599 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T14:45:48,599 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T14:45:48,599 INFO [RS:2;a5d22df9eca2:34577 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T14:45:48,599 INFO [RS:2;a5d22df9eca2:34577 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T14:45:48,599 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T14:45:48,599 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(3091): Received CLOSE for bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(959): stopping server a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:48,599 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a5d22df9eca2:37509. 2024-12-03T14:45:48,599 INFO [RS:0;a5d22df9eca2:40067 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T14:45:48,599 INFO [RS:0;a5d22df9eca2:40067 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T14:45:48,599 DEBUG [RS:1;a5d22df9eca2:37509 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:48,599 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(959): stopping server a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:48,599 DEBUG [RS:1;a5d22df9eca2:37509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,599 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:48,599 INFO [RS:0;a5d22df9eca2:40067 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a5d22df9eca2:40067. 2024-12-03T14:45:48,599 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(976): stopping server a5d22df9eca2,37509,1733237146269; all regions closed. 2024-12-03T14:45:48,599 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(959): stopping server a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:48,600 DEBUG [RS:0;a5d22df9eca2:40067 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:48,600 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bde0e1116d914ce50608965220a717fb, disabling compactions & flushes 2024-12-03T14:45:48,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,600 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:48,600 DEBUG [RS:0;a5d22df9eca2:40067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,600 INFO [RS:2;a5d22df9eca2:34577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a5d22df9eca2:34577. 2024-12-03T14:45:48,600 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(976): stopping server a5d22df9eca2,40067,1733237146214; all regions closed. 2024-12-03T14:45:48,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,600 DEBUG [RS:2;a5d22df9eca2:34577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T14:45:48,601 DEBUG [RS:2;a5d22df9eca2:34577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,601 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T14:45:48,601 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T14:45:48,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,601 INFO [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,601 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T14:45:48,601 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T14:45:48,601 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,601 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. after waiting 0 ms 2024-12-03T14:45:48,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,601 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,602 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,602 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,602 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,602 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,602 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,603 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T14:45:48,603 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1325): Online Regions={bde0e1116d914ce50608965220a717fb=TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T14:45:48,603 DEBUG [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bde0e1116d914ce50608965220a717fb 2024-12-03T14:45:48,603 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T14:45:48,603 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T14:45:48,604 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T14:45:48,604 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T14:45:48,604 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T14:45:48,604 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-03T14:45:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741833_1009 (size=93) 2024-12-03T14:45:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741833_1009 (size=93) 2024-12-03T14:45:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741833_1009 (size=93) 2024-12-03T14:45:48,609 DEBUG [RS:0;a5d22df9eca2:40067 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs 2024-12-03T14:45:48,609 INFO [RS:0;a5d22df9eca2:40067 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a5d22df9eca2%2C40067%2C1733237146214:(num 1733237146884) 2024-12-03T14:45:48,609 DEBUG [RS:0;a5d22df9eca2:40067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,609 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:48,609 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:48,610 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.ChoreService(370): Chore service for: regionserver/a5d22df9eca2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:48,610 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T14:45:48,610 INFO [regionserver/a5d22df9eca2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:48,610 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T14:45:48,610 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T14:45:48,610 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:48,610 INFO [RS:0;a5d22df9eca2:40067 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40067 2024-12-03T14:45:48,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a5d22df9eca2,40067,1733237146214 2024-12-03T14:45:48,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:48,612 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:48,613 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a5d22df9eca2,40067,1733237146214] 2024-12-03T14:45:48,615 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a5d22df9eca2,40067,1733237146214 already deleted, retry=false 2024-12-03T14:45:48,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741834_1010 (size=93) 2024-12-03T14:45:48,615 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a5d22df9eca2,40067,1733237146214 expired; onlineServers=2 2024-12-03T14:45:48,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741834_1010 (size=93) 2024-12-03T14:45:48,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741834_1010 (size=93) 2024-12-03T14:45:48,618 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/default/TestHBaseWalOnEC/bde0e1116d914ce50608965220a717fb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-03T14:45:48,619 INFO [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,619 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bde0e1116d914ce50608965220a717fb: Waiting for close lock at 1733237148600Running coprocessor pre-close hooks at 1733237148600Disabling compacts and flushes for region at 1733237148600Disabling writes for close at 1733237148601 (+1 ms)Writing region close event to WAL at 1733237148610 (+9 ms)Running coprocessor post-close hooks at 1733237148619 (+9 ms)Closed at 1733237148619 2024-12-03T14:45:48,619 DEBUG [RS_CLOSE_REGION-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb. 2024-12-03T14:45:48,629 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/info/bdb01a00834b4a37a3c721455cba552a is 153, key is TestHBaseWalOnEC,,1733237147598.bde0e1116d914ce50608965220a717fb./info:regioninfo/1733237148001/Put/seqid=0 2024-12-03T14:45:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741840_1016 (size=6637) 2024-12-03T14:45:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741840_1016 (size=6637) 2024-12-03T14:45:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741840_1016 (size=6637) 2024-12-03T14:45:48,637 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/info/bdb01a00834b4a37a3c721455cba552a 2024-12-03T14:45:48,644 INFO [regionserver/a5d22df9eca2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:48,650 INFO [regionserver/a5d22df9eca2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:48,650 INFO [regionserver/a5d22df9eca2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:48,659 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/ns/cfabea566cb843d98b68409ac8ce343c is 43, key is default/ns:d/1733237147457/Put/seqid=0 2024-12-03T14:45:48,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741841_1017 (size=5153) 2024-12-03T14:45:48,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741841_1017 (size=5153) 2024-12-03T14:45:48,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741841_1017 (size=5153) 2024-12-03T14:45:48,667 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/ns/cfabea566cb843d98b68409ac8ce343c 2024-12-03T14:45:48,692 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/table/cf7d4a42a7c04b369fed3f008d08e570 is 52, key is TestHBaseWalOnEC/table:state/1733237148018/Put/seqid=0 2024-12-03T14:45:48,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741842_1018 (size=5249) 2024-12-03T14:45:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741842_1018 (size=5249) 2024-12-03T14:45:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741842_1018 (size=5249) 2024-12-03T14:45:48,700 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/table/cf7d4a42a7c04b369fed3f008d08e570 2024-12-03T14:45:48,708 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/info/bdb01a00834b4a37a3c721455cba552a as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/info/bdb01a00834b4a37a3c721455cba552a 2024-12-03T14:45:48,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:48,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40067-0x100a08b5e8a0001, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:48,716 INFO [RS:0;a5d22df9eca2:40067 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:48,716 INFO [RS:0;a5d22df9eca2:40067 {}] regionserver.HRegionServer(1031): Exiting; stopping=a5d22df9eca2,40067,1733237146214; zookeeper connection closed. 2024-12-03T14:45:48,716 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e809068 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e809068 2024-12-03T14:45:48,717 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/info/bdb01a00834b4a37a3c721455cba552a, entries=10, sequenceid=11, filesize=6.5 K 2024-12-03T14:45:48,720 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/ns/cfabea566cb843d98b68409ac8ce343c as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/ns/cfabea566cb843d98b68409ac8ce343c 2024-12-03T14:45:48,728 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/ns/cfabea566cb843d98b68409ac8ce343c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T14:45:48,730 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/.tmp/table/cf7d4a42a7c04b369fed3f008d08e570 as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/table/cf7d4a42a7c04b369fed3f008d08e570 2024-12-03T14:45:48,738 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/table/cf7d4a42a7c04b369fed3f008d08e570, entries=2, sequenceid=11, filesize=5.1 K 2024-12-03T14:45:48,740 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-12-03T14:45:48,745 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T14:45:48,745 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T14:45:48,746 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T14:45:48,746 INFO [regionserver/a5d22df9eca2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T14:45:48,746 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T14:45:48,747 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T14:45:48,747 INFO [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T14:45:48,747 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733237148603Running coprocessor pre-close hooks at 1733237148603Disabling compacts and flushes for region at 1733237148603Disabling writes for close at 1733237148604 (+1 ms)Obtaining lock to block concurrent updates at 1733237148604Preparing flush snapshotting stores in 1588230740 at 1733237148604Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733237148605 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733237148606 (+1 ms)Flushing 1588230740/info: creating writer at 1733237148606Flushing 1588230740/info: appending metadata at 1733237148629 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733237148629Flushing 1588230740/ns: creating writer at 1733237148644 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733237148658 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733237148658Flushing 1588230740/table: creating writer at 1733237148674 (+16 ms)Flushing 1588230740/table: appending metadata at 1733237148692 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733237148692Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e64e22d: reopening flushed file at 1733237148707 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a9c0d88: reopening flushed file at 1733237148717 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@403ced32: reopening flushed file at 1733237148729 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1733237148740 (+11 ms)Writing region close event to WAL at 1733237148741 (+1 ms)Running coprocessor post-close hooks at 1733237148747 (+6 ms)Closed at 1733237148747 2024-12-03T14:45:48,747 DEBUG [RS_CLOSE_META-regionserver/a5d22df9eca2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T14:45:48,804 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(976): stopping server a5d22df9eca2,34577,1733237146324; all regions closed. 2024-12-03T14:45:48,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741836_1012 (size=2751) 2024-12-03T14:45:48,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741836_1012 (size=2751) 2024-12-03T14:45:48,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741836_1012 (size=2751) 2024-12-03T14:45:48,811 DEBUG [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs 2024-12-03T14:45:48,811 INFO [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a5d22df9eca2%2C34577%2C1733237146324.meta:.meta(num 1733237147319) 2024-12-03T14:45:48,811 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,812 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,812 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,812 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,812 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:48,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741835_1011 (size=1298) 2024-12-03T14:45:48,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741835_1011 (size=1298) 2024-12-03T14:45:48,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741835_1011 (size=1298) 2024-12-03T14:45:48,818 DEBUG [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs 2024-12-03T14:45:48,818 INFO [RS:2;a5d22df9eca2:34577 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a5d22df9eca2%2C34577%2C1733237146324:(num 1733237146886) 2024-12-03T14:45:48,818 DEBUG [RS:2;a5d22df9eca2:34577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:48,818 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:48,818 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:48,818 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.ChoreService(370): Chore service for: regionserver/a5d22df9eca2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:48,818 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:48,819 INFO [regionserver/a5d22df9eca2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:48,819 INFO [RS:2;a5d22df9eca2:34577 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34577 2024-12-03T14:45:48,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:48,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a5d22df9eca2,34577,1733237146324 2024-12-03T14:45:48,821 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:48,822 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a5d22df9eca2,34577,1733237146324] 2024-12-03T14:45:48,823 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a5d22df9eca2,34577,1733237146324 already deleted, retry=false 2024-12-03T14:45:48,823 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a5d22df9eca2,34577,1733237146324 expired; onlineServers=1 2024-12-03T14:45:48,923 INFO [RS:2;a5d22df9eca2:34577 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:48,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:48,923 INFO [RS:2;a5d22df9eca2:34577 {}] regionserver.HRegionServer(1031): Exiting; stopping=a5d22df9eca2,34577,1733237146324; zookeeper connection closed. 2024-12-03T14:45:48,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34577-0x100a08b5e8a0003, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:48,923 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@29f16ad3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@29f16ad3 2024-12-03T14:45:49,017 DEBUG [RS:1;a5d22df9eca2:37509 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/oldWALs 2024-12-03T14:45:49,017 INFO [RS:1;a5d22df9eca2:37509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a5d22df9eca2%2C37509%2C1733237146269:(num 1733237146886) 2024-12-03T14:45:49,017 DEBUG [RS:1;a5d22df9eca2:37509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T14:45:49,017 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T14:45:49,017 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:49,018 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.ChoreService(370): Chore service for: regionserver/a5d22df9eca2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:49,018 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T14:45:49,018 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T14:45:49,018 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T14:45:49,018 INFO [regionserver/a5d22df9eca2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:49,018 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:49,018 INFO [RS:1;a5d22df9eca2:37509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37509 2024-12-03T14:45:49,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T14:45:49,019 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a5d22df9eca2,37509,1733237146269 2024-12-03T14:45:49,019 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:49,020 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a5d22df9eca2,37509,1733237146269] 2024-12-03T14:45:49,021 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a5d22df9eca2,37509,1733237146269 already deleted, retry=false 2024-12-03T14:45:49,021 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a5d22df9eca2,37509,1733237146269 expired; onlineServers=0 2024-12-03T14:45:49,021 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a5d22df9eca2,45807,1733237146162' ***** 2024-12-03T14:45:49,021 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T14:45:49,021 INFO [M:0;a5d22df9eca2:45807 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T14:45:49,021 INFO [M:0;a5d22df9eca2:45807 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T14:45:49,022 DEBUG [M:0;a5d22df9eca2:45807 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T14:45:49,022 DEBUG [M:0;a5d22df9eca2:45807 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T14:45:49,022 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T14:45:49,022 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.large.0-1733237146647 {}] cleaner.HFileCleaner(306): Exit Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.large.0-1733237146647,5,FailOnTimeoutGroup] 2024-12-03T14:45:49,022 DEBUG [master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.small.0-1733237146649 {}] cleaner.HFileCleaner(306): Exit Thread[master/a5d22df9eca2:0:becomeActiveMaster-HFileCleaner.small.0-1733237146649,5,FailOnTimeoutGroup] 2024-12-03T14:45:49,022 INFO [M:0;a5d22df9eca2:45807 {}] hbase.ChoreService(370): Chore service for: master/a5d22df9eca2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T14:45:49,022 INFO [M:0;a5d22df9eca2:45807 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T14:45:49,022 DEBUG [M:0;a5d22df9eca2:45807 {}] master.HMaster(1795): Stopping service threads 2024-12-03T14:45:49,022 INFO [M:0;a5d22df9eca2:45807 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T14:45:49,022 INFO [M:0;a5d22df9eca2:45807 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T14:45:49,022 INFO [M:0;a5d22df9eca2:45807 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T14:45:49,022 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T14:45:49,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T14:45:49,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T14:45:49,023 DEBUG [M:0;a5d22df9eca2:45807 {}] zookeeper.ZKUtil(347): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T14:45:49,023 WARN [M:0;a5d22df9eca2:45807 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T14:45:49,024 INFO [M:0;a5d22df9eca2:45807 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/.lastflushedseqids 2024-12-03T14:45:49,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741843_1019 (size=127) 2024-12-03T14:45:49,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741843_1019 (size=127) 2024-12-03T14:45:49,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741843_1019 (size=127) 2024-12-03T14:45:49,034 INFO [M:0;a5d22df9eca2:45807 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T14:45:49,034 INFO [M:0;a5d22df9eca2:45807 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T14:45:49,034 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T14:45:49,034 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:49,034 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:49,034 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T14:45:49,034 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:49,034 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-03T14:45:49,051 DEBUG [M:0;a5d22df9eca2:45807 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8611c3285f994746ae2ae01b73a80d7c is 82, key is hbase:meta,,1/info:regioninfo/1733237147433/Put/seqid=0 2024-12-03T14:45:49,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741844_1020 (size=5672) 2024-12-03T14:45:49,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741844_1020 (size=5672) 2024-12-03T14:45:49,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741844_1020 (size=5672) 2024-12-03T14:45:49,059 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8611c3285f994746ae2ae01b73a80d7c 2024-12-03T14:45:49,092 DEBUG [M:0;a5d22df9eca2:45807 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5c099cd34cf24458abc104d43b82fba7 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733237148025/Put/seqid=0 2024-12-03T14:45:49,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741845_1021 (size=6440) 2024-12-03T14:45:49,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741845_1021 (size=6440) 2024-12-03T14:45:49,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741845_1021 (size=6440) 2024-12-03T14:45:49,112 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5c099cd34cf24458abc104d43b82fba7 2024-12-03T14:45:49,120 INFO [RS:1;a5d22df9eca2:37509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:49,120 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:49,120 INFO [RS:1;a5d22df9eca2:37509 {}] regionserver.HRegionServer(1031): Exiting; stopping=a5d22df9eca2,37509,1733237146269; zookeeper connection closed. 2024-12-03T14:45:49,120 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37509-0x100a08b5e8a0002, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:49,125 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@192a2791 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@192a2791 2024-12-03T14:45:49,126 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-03T14:45:49,155 DEBUG [M:0;a5d22df9eca2:45807 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dc4bd15a2154f43953def289a1c5d97 is 69, key is a5d22df9eca2,34577,1733237146324/rs:state/1733237146702/Put/seqid=0 2024-12-03T14:45:49,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741846_1022 (size=5294) 2024-12-03T14:45:49,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741846_1022 (size=5294) 2024-12-03T14:45:49,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741846_1022 (size=5294) 2024-12-03T14:45:49,177 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dc4bd15a2154f43953def289a1c5d97 2024-12-03T14:45:49,190 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8611c3285f994746ae2ae01b73a80d7c as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8611c3285f994746ae2ae01b73a80d7c 2024-12-03T14:45:49,200 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8611c3285f994746ae2ae01b73a80d7c, entries=8, sequenceid=72, filesize=5.5 K 2024-12-03T14:45:49,203 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5c099cd34cf24458abc104d43b82fba7 as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5c099cd34cf24458abc104d43b82fba7 2024-12-03T14:45:49,211 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5c099cd34cf24458abc104d43b82fba7, entries=8, sequenceid=72, filesize=6.3 K 2024-12-03T14:45:49,213 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dc4bd15a2154f43953def289a1c5d97 as hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3dc4bd15a2154f43953def289a1c5d97 2024-12-03T14:45:49,222 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40139/user/jenkins/test-data/999e5286-6ceb-ff40-154f-5edc3a632909/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3dc4bd15a2154f43953def289a1c5d97, entries=3, sequenceid=72, filesize=5.2 K 2024-12-03T14:45:49,224 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 190ms, sequenceid=72, compaction requested=false 2024-12-03T14:45:49,226 INFO [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T14:45:49,226 DEBUG [M:0;a5d22df9eca2:45807 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733237149034Disabling compacts and flushes for region at 1733237149034Disabling writes for close at 1733237149034Obtaining lock to block concurrent updates at 1733237149034Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733237149034Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733237149035 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733237149036 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733237149036Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733237149050 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733237149051 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733237149066 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733237149091 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733237149091Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733237149120 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733237149154 (+34 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733237149154Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c3cef80: reopening flushed file at 1733237149189 (+35 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54cb40db: reopening flushed file at 1733237149201 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7290a2b1: reopening flushed file at 1733237149211 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 190ms, sequenceid=72, compaction requested=false at 1733237149224 (+13 ms)Writing region close event to WAL at 1733237149226 (+2 ms)Closed at 1733237149226 2024-12-03T14:45:49,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:49,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:49,227 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:49,227 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:49,227 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T14:45:49,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45965 is added to blk_1073741830_1006 (size=32683) 2024-12-03T14:45:49,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35547 is added to blk_1073741830_1006 (size=32683) 2024-12-03T14:45:49,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43853 is added to blk_1073741830_1006 (size=32683) 2024-12-03T14:45:49,232 INFO [M:0;a5d22df9eca2:45807 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T14:45:49,232 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T14:45:49,233 INFO [M:0;a5d22df9eca2:45807 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45807 2024-12-03T14:45:49,233 INFO [M:0;a5d22df9eca2:45807 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T14:45:49,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:49,335 INFO [M:0;a5d22df9eca2:45807 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T14:45:49,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45807-0x100a08b5e8a0000, quorum=127.0.0.1:53112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T14:45:49,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f50f857{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:49,343 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7eeef71e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:49,343 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:49,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40b03519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:49,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44968fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:49,349 WARN [BP-1770701912-172.17.0.2-1733237145441 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T14:45:49,349 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T14:45:49,349 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T14:45:49,349 WARN [BP-1770701912-172.17.0.2-1733237145441 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1770701912-172.17.0.2-1733237145441 (Datanode Uuid aefcb56c-3079-4c8c-8591-48e0f889c700) service to localhost/127.0.0.1:40139 2024-12-03T14:45:49,350 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data5/current/BP-1770701912-172.17.0.2-1733237145441 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:49,350 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data6/current/BP-1770701912-172.17.0.2-1733237145441 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:49,350 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T14:45:49,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46f4cd0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:49,358 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@700b2317{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:49,358 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:49,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55cf3a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:49,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4148d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:49,361 WARN [BP-1770701912-172.17.0.2-1733237145441 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T14:45:49,361 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T14:45:49,361 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T14:45:49,361 WARN [BP-1770701912-172.17.0.2-1733237145441 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1770701912-172.17.0.2-1733237145441 (Datanode Uuid f741b13e-b0e9-4f2b-8805-76233ceb2a1a) service to localhost/127.0.0.1:40139 2024-12-03T14:45:49,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data3/current/BP-1770701912-172.17.0.2-1733237145441 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:49,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data4/current/BP-1770701912-172.17.0.2-1733237145441 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:49,362 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T14:45:49,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18f854cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T14:45:49,369 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70fdfe33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:49,370 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:49,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72f96008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:49,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c4ebd49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:49,373 WARN [BP-1770701912-172.17.0.2-1733237145441 heartbeating to localhost/127.0.0.1:40139 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T14:45:49,373 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T14:45:49,373 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T14:45:49,373 WARN [BP-1770701912-172.17.0.2-1733237145441 heartbeating to localhost/127.0.0.1:40139 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1770701912-172.17.0.2-1733237145441 (Datanode Uuid 7aa017bd-052e-4b56-bbb5-9271075ea031) service to localhost/127.0.0.1:40139 2024-12-03T14:45:49,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data1/current/BP-1770701912-172.17.0.2-1733237145441 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:49,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/cluster_b7ee057f-3561-35b0-d222-225c4e2be475/data/data2/current/BP-1770701912-172.17.0.2-1733237145441 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T14:45:49,374 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T14:45:49,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15027254{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T14:45:49,380 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4293887f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T14:45:49,380 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T14:45:49,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e58533{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T14:45:49,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ad8d9de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/943f1bb7-fb24-ff52-77e6-332a19b87f96/hadoop.log.dir/,STOPPED} 2024-12-03T14:45:49,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T14:45:49,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T14:45:49,429 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=146 (was 88) - Thread LEAK? -, OpenFileDescriptor=521 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=322 (was 333), ProcessCount=11 (was 11), AvailableMemoryMB=4949 (was 5223)