2024-11-11 15:49:00,419 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5ddeb7cb 2024-11-11 15:49:00,446 main DEBUG Took 0.022085 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 15:49:00,447 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 15:49:00,448 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 15:49:00,450 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 15:49:00,453 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,470 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 15:49:00,610 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,613 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,614 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,615 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,615 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,616 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,617 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,617 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,618 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,619 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,620 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,621 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,623 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,623 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,625 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,626 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,626 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,627 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,628 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,629 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,631 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,632 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,633 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,635 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 15:49:00,636 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,637 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 15:49:00,642 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 15:49:00,644 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 15:49:00,649 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 15:49:00,659 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 15:49:00,665 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 15:49:00,666 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 15:49:00,701 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 15:49:00,705 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 15:49:00,707 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 15:49:00,714 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 15:49:00,715 main DEBUG createAppenders(={Console}) 2024-11-11 15:49:00,716 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5ddeb7cb initialized 2024-11-11 15:49:00,718 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5ddeb7cb 2024-11-11 15:49:00,718 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5ddeb7cb OK. 2024-11-11 15:49:00,719 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 15:49:00,719 main DEBUG OutputStream closed 2024-11-11 15:49:00,720 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 15:49:00,720 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 15:49:00,721 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@10d307f1 OK 2024-11-11 15:49:00,851 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 15:49:00,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 15:49:00,857 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 15:49:00,858 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 15:49:00,859 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 15:49:00,859 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 15:49:00,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 15:49:00,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 15:49:00,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 15:49:00,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 15:49:00,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 15:49:00,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 15:49:00,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 15:49:00,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 15:49:00,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 15:49:00,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 15:49:00,876 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 15:49:00,877 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 15:49:00,891 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 15:49:00,891 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7b420819) with optional ClassLoader: null 2024-11-11 15:49:00,894 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 15:49:00,895 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7b420819] started OK. 2024-11-11T15:49:00,928 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-11 15:49:00,932 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 15:49:00,932 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T15:49:01,577 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558 2024-11-11T15:49:01,637 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d, deleteOnExit=true 2024-11-11T15:49:01,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/test.cache.data in system properties and HBase conf 2024-11-11T15:49:01,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T15:49:01,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir in system properties and HBase conf 2024-11-11T15:49:01,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T15:49:01,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T15:49:01,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T15:49:01,872 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T15:49:02,040 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T15:49:02,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T15:49:02,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T15:49:02,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T15:49:02,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T15:49:02,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T15:49:02,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T15:49:02,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T15:49:02,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T15:49:02,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T15:49:02,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/nfs.dump.dir in system properties and HBase conf 2024-11-11T15:49:02,078 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/java.io.tmpdir in system properties and HBase conf 2024-11-11T15:49:02,078 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T15:49:02,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T15:49:02,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T15:49:03,609 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T15:49:03,804 INFO [Time-limited test {}] log.Log(170): Logging initialized @5058ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T15:49:03,937 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:04,037 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:04,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:04,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:04,103 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T15:49:04,130 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:04,136 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:04,138 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:04,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/java.io.tmpdir/jetty-localhost-35993-hadoop-hdfs-3_4_1-tests_jar-_-any-13095385652913349806/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T15:49:04,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:35993} 2024-11-11T15:49:04,459 INFO [Time-limited test {}] server.Server(415): Started @5714ms 2024-11-11T15:49:05,153 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:05,174 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:05,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:05,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:05,193 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T15:49:05,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@716a4960{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:05,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d388052{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:05,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea802a5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/java.io.tmpdir/jetty-localhost-43649-hadoop-hdfs-3_4_1-tests_jar-_-any-5472919461529471542/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:05,388 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24c8c1e0{HTTP/1.1, (http/1.1)}{localhost:43649} 2024-11-11T15:49:05,388 INFO [Time-limited test {}] server.Server(415): Started @6644ms 2024-11-11T15:49:05,493 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T15:49:05,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:05,779 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:05,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:05,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:05,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T15:49:05,805 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@448e9acc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:05,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a445e53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:05,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@328e0d16{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/java.io.tmpdir/jetty-localhost-33669-hadoop-hdfs-3_4_1-tests_jar-_-any-13805143847375159138/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:06,000 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61998c4c{HTTP/1.1, (http/1.1)}{localhost:33669} 2024-11-11T15:49:06,000 INFO [Time-limited test {}] server.Server(415): Started @7256ms 2024-11-11T15:49:06,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T15:49:06,180 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:06,193 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:06,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:06,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:06,218 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T15:49:06,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e3331e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:06,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30f23eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:06,273 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data1/current/BP-1086051422-172.17.0.3-1731340143188/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:06,275 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data3/current/BP-1086051422-172.17.0.3-1731340143188/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:06,276 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data2/current/BP-1086051422-172.17.0.3-1731340143188/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:06,280 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data4/current/BP-1086051422-172.17.0.3-1731340143188/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:06,356 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T15:49:06,360 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T15:49:06,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6cced15c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/java.io.tmpdir/jetty-localhost-41187-hadoop-hdfs-3_4_1-tests_jar-_-any-5129224537812407218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:06,416 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38003bf3{HTTP/1.1, (http/1.1)}{localhost:41187} 2024-11-11T15:49:06,416 INFO [Time-limited test {}] server.Server(415): Started @7671ms 2024-11-11T15:49:06,419 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T15:49:06,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa13e345dc7f3c6a with lease ID 0xe1892bbb2c038137: Processing first storage report for DS-494bae4f-2893-45b5-a6db-0c68f398cc5e from datanode DatanodeRegistration(127.0.0.1:39241, datanodeUuid=cd03cfc1-bded-4164-ba54-85ee07d32e58, infoPort=37769, infoSecurePort=0, ipcPort=43085, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188) 2024-11-11T15:49:06,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa13e345dc7f3c6a with lease ID 0xe1892bbb2c038137: from storage DS-494bae4f-2893-45b5-a6db-0c68f398cc5e node DatanodeRegistration(127.0.0.1:39241, datanodeUuid=cd03cfc1-bded-4164-ba54-85ee07d32e58, infoPort=37769, infoSecurePort=0, ipcPort=43085, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T15:49:06,484 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf039a25276da6d25 with lease ID 0xe1892bbb2c038138: Processing first storage report for DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f from datanode DatanodeRegistration(127.0.0.1:36465, datanodeUuid=8637a192-734f-4ac6-ace3-6fffce437bb3, infoPort=41835, infoSecurePort=0, ipcPort=40333, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188) 2024-11-11T15:49:06,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf039a25276da6d25 with lease ID 0xe1892bbb2c038138: from storage DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f node DatanodeRegistration(127.0.0.1:36465, datanodeUuid=8637a192-734f-4ac6-ace3-6fffce437bb3, infoPort=41835, infoSecurePort=0, ipcPort=40333, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:06,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf039a25276da6d25 with lease ID 0xe1892bbb2c038138: Processing first storage report for DS-ff7547d6-9551-44c3-91a8-dd99496edc8c from datanode DatanodeRegistration(127.0.0.1:36465, datanodeUuid=8637a192-734f-4ac6-ace3-6fffce437bb3, infoPort=41835, infoSecurePort=0, ipcPort=40333, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188) 2024-11-11T15:49:06,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf039a25276da6d25 with lease ID 0xe1892bbb2c038138: from storage DS-ff7547d6-9551-44c3-91a8-dd99496edc8c node DatanodeRegistration(127.0.0.1:36465, datanodeUuid=8637a192-734f-4ac6-ace3-6fffce437bb3, infoPort=41835, infoSecurePort=0, ipcPort=40333, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa13e345dc7f3c6a with lease ID 0xe1892bbb2c038137: Processing first storage report for DS-04aed72c-16e4-4904-8c4f-292005cf083d from datanode DatanodeRegistration(127.0.0.1:39241, datanodeUuid=cd03cfc1-bded-4164-ba54-85ee07d32e58, infoPort=37769, infoSecurePort=0, ipcPort=43085, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188) 2024-11-11T15:49:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa13e345dc7f3c6a with lease ID 0xe1892bbb2c038137: from storage DS-04aed72c-16e4-4904-8c4f-292005cf083d node DatanodeRegistration(127.0.0.1:39241, datanodeUuid=cd03cfc1-bded-4164-ba54-85ee07d32e58, infoPort=37769, infoSecurePort=0, ipcPort=43085, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:06,636 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data5/current/BP-1086051422-172.17.0.3-1731340143188/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:06,638 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data6/current/BP-1086051422-172.17.0.3-1731340143188/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:06,738 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T15:49:06,749 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14295da4adab6f56 with lease ID 0xe1892bbb2c038139: Processing first storage report for DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9 from datanode DatanodeRegistration(127.0.0.1:42479, datanodeUuid=a85cba08-0cf2-4670-82cd-3d9b8fc8ddff, infoPort=44127, infoSecurePort=0, ipcPort=43459, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188) 2024-11-11T15:49:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14295da4adab6f56 with lease ID 0xe1892bbb2c038139: from storage DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9 node DatanodeRegistration(127.0.0.1:42479, datanodeUuid=a85cba08-0cf2-4670-82cd-3d9b8fc8ddff, infoPort=44127, infoSecurePort=0, ipcPort=43459, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14295da4adab6f56 with lease ID 0xe1892bbb2c038139: Processing first storage report for DS-2e9cf3ff-4cb1-4926-b6de-9ec4be6fec16 from datanode DatanodeRegistration(127.0.0.1:42479, datanodeUuid=a85cba08-0cf2-4670-82cd-3d9b8fc8ddff, infoPort=44127, infoSecurePort=0, ipcPort=43459, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188) 2024-11-11T15:49:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14295da4adab6f56 with lease ID 0xe1892bbb2c038139: from storage DS-2e9cf3ff-4cb1-4926-b6de-9ec4be6fec16 node DatanodeRegistration(127.0.0.1:42479, datanodeUuid=a85cba08-0cf2-4670-82cd-3d9b8fc8ddff, infoPort=44127, infoSecurePort=0, ipcPort=43459, storageInfo=lv=-57;cid=testClusterID;nsid=209398083;c=1731340143188), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:07,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558 2024-11-11T15:49:07,190 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-11T15:49:07,301 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=155, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=1326, ProcessCount=11, AvailableMemoryMB=2445 2024-11-11T15:49:07,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T15:49:07,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-11T15:49:07,509 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/zookeeper_0, clientPort=49707, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T15:49:07,523 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49707 2024-11-11T15:49:07,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:07,548 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:07,778 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:07,778 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:07,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:37754 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37754 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:07,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-11T15:49:08,338 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:08,352 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04 with version=8 2024-11-11T15:49:08,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/hbase-staging 2024-11-11T15:49:08,492 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T15:49:08,857 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:08,871 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:08,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:08,877 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:08,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:08,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:09,083 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T15:49:09,189 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T15:49:09,203 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T15:49:09,210 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:09,245 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 25045 (auto-detected) 2024-11-11T15:49:09,246 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-11T15:49:09,270 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41091 2024-11-11T15:49:09,298 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41091 connecting to ZooKeeper ensemble=127.0.0.1:49707 2024-11-11T15:49:09,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410910x0, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:09,340 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41091-0x1019805905b0000 connected 2024-11-11T15:49:09,383 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,386 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:09,402 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04, hbase.cluster.distributed=false 2024-11-11T15:49:09,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:09,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41091 2024-11-11T15:49:09,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41091 2024-11-11T15:49:09,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41091 2024-11-11T15:49:09,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41091 2024-11-11T15:49:09,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41091 2024-11-11T15:49:09,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-11T15:49:09,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-11T15:49:09,601 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:09,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,604 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:09,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:09,607 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T15:49:09,610 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:09,612 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37975 2024-11-11T15:49:09,614 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37975 connecting to ZooKeeper ensemble=127.0.0.1:49707 2024-11-11T15:49:09,615 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,636 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379750x0, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:09,638 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379750x0, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:09,640 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37975-0x1019805905b0001 connected 2024-11-11T15:49:09,643 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T15:49:09,656 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T15:49:09,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T15:49:09,679 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:09,687 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37975 2024-11-11T15:49:09,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37975 2024-11-11T15:49:09,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37975 2024-11-11T15:49:09,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37975 2024-11-11T15:49:09,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37975 2024-11-11T15:49:09,732 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:09,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,733 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:09,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:09,734 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T15:49:09,734 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:09,737 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46235 2024-11-11T15:49:09,740 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46235 connecting to ZooKeeper ensemble=127.0.0.1:49707 2024-11-11T15:49:09,741 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462350x0, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:09,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46235-0x1019805905b0002 connected 2024-11-11T15:49:09,763 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:09,764 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T15:49:09,768 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T15:49:09,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T15:49:09,779 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:09,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46235 2024-11-11T15:49:09,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46235 2024-11-11T15:49:09,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46235 2024-11-11T15:49:09,802 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46235 2024-11-11T15:49:09,803 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46235 2024-11-11T15:49:09,830 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:09,830 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,831 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:09,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:09,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:09,832 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T15:49:09,832 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:09,834 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35713 2024-11-11T15:49:09,838 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35713 connecting to ZooKeeper ensemble=127.0.0.1:49707 2024-11-11T15:49:09,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:09,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357130x0, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:09,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:357130x0, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:09,856 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35713-0x1019805905b0003 connected 2024-11-11T15:49:09,857 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T15:49:09,858 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T15:49:09,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T15:49:09,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:09,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35713 2024-11-11T15:49:09,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35713 2024-11-11T15:49:09,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35713 2024-11-11T15:49:09,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35713 2024-11-11T15:49:09,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35713 2024-11-11T15:49:09,903 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9a1fddc00362:41091 2024-11-11T15:49:09,904 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9a1fddc00362,41091,1731340148573 2024-11-11T15:49:09,914 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,918 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9a1fddc00362,41091,1731340148573 2024-11-11T15:49:09,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:09,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:09,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:09,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:09,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:09,960 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:09,960 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:09,962 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T15:49:09,967 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9a1fddc00362,41091,1731340148573 from backup master directory 2024-11-11T15:49:09,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9a1fddc00362,41091,1731340148573 2024-11-11T15:49:09,972 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:09,978 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:09,979 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9a1fddc00362,41091,1731340148573 2024-11-11T15:49:09,983 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T15:49:09,987 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T15:49:10,062 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/hbase.id] with ID: f3a4f029-de6a-4cce-b975-9a08c2c3053f 2024-11-11T15:49:10,062 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/.tmp/hbase.id 2024-11-11T15:49:10,081 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,081 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,086 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45168 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45168 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:10,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-11T15:49:10,499 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:10,500 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/.tmp/hbase.id]:[hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/hbase.id] 2024-11-11T15:49:10,559 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:10,565 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T15:49:10,593 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-11-11T15:49:10,602 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:10,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:10,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:10,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:10,628 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,628 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45192 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45192 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:10,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-11T15:49:10,658 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:10,678 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T15:49:10,681 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T15:49:10,690 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T15:49:10,728 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,728 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:51728 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51728 dst: /127.0.0.1:39241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:10,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-11T15:49:10,762 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:10,790 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store 2024-11-11T15:49:10,814 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,814 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:10,820 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45220 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45220 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:10,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-11T15:49:10,830 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:10,836 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T15:49:10,839 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:10,841 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T15:49:10,841 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:10,842 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:10,844 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T15:49:10,844 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:10,844 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:10,845 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731340150841Disabling compacts and flushes for region at 1731340150841Disabling writes for close at 1731340150844 (+3 ms)Writing region close event to WAL at 1731340150844Closed at 1731340150844 2024-11-11T15:49:10,848 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/.initializing 2024-11-11T15:49:10,848 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/WALs/9a1fddc00362,41091,1731340148573 2024-11-11T15:49:10,861 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T15:49:10,890 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C41091%2C1731340148573, suffix=, logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/WALs/9a1fddc00362,41091,1731340148573, archiveDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/oldWALs, maxLogs=10 2024-11-11T15:49:10,925 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/WALs/9a1fddc00362,41091,1731340148573/9a1fddc00362%2C41091%2C1731340148573.1731340150895, exclude list is [], retry=0 2024-11-11T15:49:10,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:10,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-494bae4f-2893-45b5-a6db-0c68f398cc5e,DISK] 2024-11-11T15:49:10,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9,DISK] 2024-11-11T15:49:10,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36465,DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f,DISK] 2024-11-11T15:49:10,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-11T15:49:11,018 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/WALs/9a1fddc00362,41091,1731340148573/9a1fddc00362%2C41091%2C1731340148573.1731340150895 2024-11-11T15:49:11,021 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37769:37769),(127.0.0.1/127.0.0.1:41835:41835),(127.0.0.1/127.0.0.1:44127:44127)] 2024-11-11T15:49:11,022 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T15:49:11,022 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:11,026 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,028 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T15:49:11,134 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:11,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:11,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,141 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T15:49:11,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:11,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:11,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T15:49:11,150 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:11,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:11,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T15:49:11,157 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:11,159 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:11,159 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,172 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,174 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,181 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,182 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,187 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T15:49:11,194 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:11,248 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T15:49:11,250 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59257713, jitterRate=-0.11699126660823822}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T15:49:11,258 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731340151046Initializing all the Stores at 1731340151050 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340151050Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340151051 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340151051Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340151051Cleaning up temporary data from old regions at 1731340151182 (+131 ms)Region opened successfully at 1731340151258 (+76 ms) 2024-11-11T15:49:11,260 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T15:49:11,317 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d7177b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:11,362 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T15:49:11,379 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T15:49:11,380 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T15:49:11,384 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T15:49:11,386 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T15:49:11,394 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-11T15:49:11,394 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T15:49:11,426 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T15:49:11,436 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T15:49:11,529 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T15:49:11,533 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T15:49:11,535 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T15:49:11,596 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T15:49:11,600 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T15:49:11,606 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T15:49:11,650 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T15:49:11,653 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T15:49:11,696 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T15:49:11,717 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T15:49:11,765 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T15:49:11,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:11,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:11,812 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:11,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:11,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,812 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,815 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9a1fddc00362,41091,1731340148573, sessionid=0x1019805905b0000, setting cluster-up flag (Was=false) 2024-11-11T15:49:11,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:11,958 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:12,058 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T15:49:12,061 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9a1fddc00362,41091,1731340148573 2024-11-11T15:49:12,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:12,135 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:12,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:12,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:12,358 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T15:49:12,360 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9a1fddc00362,41091,1731340148573 2024-11-11T15:49:12,367 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T15:49:12,398 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(746): ClusterId : f3a4f029-de6a-4cce-b975-9a08c2c3053f 2024-11-11T15:49:12,398 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(746): ClusterId : f3a4f029-de6a-4cce-b975-9a08c2c3053f 2024-11-11T15:49:12,398 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(746): ClusterId : f3a4f029-de6a-4cce-b975-9a08c2c3053f 2024-11-11T15:49:12,401 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T15:49:12,401 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T15:49:12,401 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T15:49:12,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-11T15:49:12,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-11T15:49:12,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-11T15:49:12,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-11T15:49:12,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-11T15:49:12,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-11T15:49:12,466 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:12,475 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T15:49:12,476 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T15:49:12,476 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T15:49:12,477 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T15:49:12,479 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T15:49:12,488 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T15:49:12,497 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9a1fddc00362,41091,1731340148573 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T15:49:12,590 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T15:49:12,590 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T15:49:12,590 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T15:49:12,591 DEBUG [RS:2;9a1fddc00362:35713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74f4a132, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:12,596 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T15:49:12,597 DEBUG [RS:0;9a1fddc00362:37975 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28184766, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:12,609 DEBUG [RS:2;9a1fddc00362:35713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9a1fddc00362:35713 2024-11-11T15:49:12,614 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T15:49:12,614 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T15:49:12,614 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9a1fddc00362:37975 2024-11-11T15:49:12,614 DEBUG [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T15:49:12,614 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T15:49:12,614 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T15:49:12,615 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T15:49:12,713 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T15:49:12,714 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(2659): reportForDuty to master=9a1fddc00362,41091,1731340148573 with port=35713, startcode=1731340149829 2024-11-11T15:49:12,714 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(2659): reportForDuty to master=9a1fddc00362,41091,1731340148573 with port=37975, startcode=1731340149555 2024-11-11T15:49:12,714 DEBUG [RS:1;9a1fddc00362:46235 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7346ed34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:12,714 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:12,715 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:12,715 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:12,715 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:12,715 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9a1fddc00362:0, corePoolSize=10, maxPoolSize=10 2024-11-11T15:49:12,715 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:12,715 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:12,716 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:12,724 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731340182724 2024-11-11T15:49:12,726 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:12,726 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T15:49:12,726 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T15:49:12,727 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T15:49:12,729 DEBUG [RS:2;9a1fddc00362:35713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T15:49:12,729 DEBUG [RS:0;9a1fddc00362:37975 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T15:49:12,731 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T15:49:12,731 DEBUG [RS:1;9a1fddc00362:46235 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9a1fddc00362:46235 2024-11-11T15:49:12,731 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T15:49:12,731 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T15:49:12,732 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T15:49:12,732 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T15:49:12,732 DEBUG [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T15:49:12,732 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T15:49:12,733 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(2659): reportForDuty to master=9a1fddc00362,41091,1731340148573 with port=46235, startcode=1731340149731 2024-11-11T15:49:12,734 DEBUG [RS:1;9a1fddc00362:46235 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T15:49:12,736 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:12,736 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T15:49:12,736 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:12,740 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T15:49:12,742 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T15:49:12,742 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T15:49:12,743 WARN [IPC Server handler 0 on default port 36701 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-11T15:49:12,743 WARN [IPC Server handler 0 on default port 36701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T15:49:12,743 WARN [IPC Server handler 0 on default port 36701 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T15:49:12,743 WARN [IPC Server handler 0 on default port 36701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T15:49:12,746 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T15:49:12,747 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T15:49:12,750 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.large.0-1731340152749,5,FailOnTimeoutGroup] 2024-11-11T15:49:12,751 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.small.0-1731340152751,5,FailOnTimeoutGroup] 2024-11-11T15:49:12,751 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:12,752 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T15:49:12,749 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy47.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(FSTableDescriptors.java:159) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.writeFsLayout(InitMetaProcedure.java:87) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.executeFromState(InitMetaProcedure.java:103) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.executeFromState(InitMetaProcedure.java:55) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] 2024-11-11T15:49:12,753 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:12,754 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:12,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-11T15:49:12,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-11T15:49:12,778 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35987, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T15:49:12,778 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59061, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T15:49:12,778 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33409, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T15:49:12,784 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41091 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9a1fddc00362,37975,1731340149555 2024-11-11T15:49:12,787 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41091 {}] master.ServerManager(517): Registering regionserver=9a1fddc00362,37975,1731340149555 2024-11-11T15:49:12,815 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:12,816 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:12,826 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41091 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9a1fddc00362,46235,1731340149731 2024-11-11T15:49:12,826 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41091 {}] master.ServerManager(517): Registering regionserver=9a1fddc00362,46235,1731340149731 2024-11-11T15:49:12,830 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04 2024-11-11T15:49:12,830 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36701 2024-11-11T15:49:12,830 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T15:49:12,831 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41091 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9a1fddc00362,35713,1731340149829 2024-11-11T15:49:12,832 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41091 {}] master.ServerManager(517): Registering regionserver=9a1fddc00362,35713,1731340149829 2024-11-11T15:49:12,832 DEBUG [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04 2024-11-11T15:49:12,832 DEBUG [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36701 2024-11-11T15:49:12,832 DEBUG [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T15:49:12,836 DEBUG [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04 2024-11-11T15:49:12,836 DEBUG [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36701 2024-11-11T15:49:12,836 DEBUG [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T15:49:12,869 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45282 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45282 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:12,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-11T15:49:12,877 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:12,880 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T15:49:12,880 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/.tabledesc/.tableinfo.0000000002.1321 2024-11-11T15:49:12,881 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04 2024-11-11T15:49:12,887 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:12,887 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:12,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:59066 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59066 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:12,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-11T15:49:12,906 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:12,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:12,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T15:49:12,915 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T15:49:12,915 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:12,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:12,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T15:49:12,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T15:49:12,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:12,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:12,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T15:49:12,953 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T15:49:12,953 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:12,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:12,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T15:49:12,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T15:49:12,958 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:12,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:12,959 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T15:49:12,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740 2024-11-11T15:49:12,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740 2024-11-11T15:49:12,965 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T15:49:12,965 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T15:49:12,966 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T15:49:12,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T15:49:12,989 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T15:49:12,990 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72567086, jitterRate=0.08133384585380554}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T15:49:12,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731340152908Initializing all the Stores at 1731340152911 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340152911Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340152912 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340152912Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340152912Cleaning up temporary data from old regions at 1731340152965 (+53 ms)Region opened successfully at 1731340152992 (+27 ms) 2024-11-11T15:49:12,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T15:49:12,993 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T15:49:12,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T15:49:12,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T15:49:12,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T15:49:12,994 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T15:49:12,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731340152993Disabling compacts and flushes for region at 1731340152993Disabling writes for close at 1731340152993Writing region close event to WAL at 1731340152994 (+1 ms)Closed at 1731340152994 2024-11-11T15:49:13,013 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:13,014 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T15:49:13,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T15:49:13,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:13,081 DEBUG [RS:1;9a1fddc00362:46235 {}] zookeeper.ZKUtil(111): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9a1fddc00362,46235,1731340149731 2024-11-11T15:49:13,081 DEBUG [RS:0;9a1fddc00362:37975 {}] zookeeper.ZKUtil(111): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9a1fddc00362,37975,1731340149555 2024-11-11T15:49:13,081 WARN [RS:0;9a1fddc00362:37975 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:13,081 WARN [RS:1;9a1fddc00362:46235 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:13,082 DEBUG [RS:2;9a1fddc00362:35713 {}] zookeeper.ZKUtil(111): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9a1fddc00362,35713,1731340149829 2024-11-11T15:49:13,082 INFO [RS:0;9a1fddc00362:37975 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T15:49:13,082 INFO [RS:1;9a1fddc00362:46235 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T15:49:13,082 WARN [RS:2;9a1fddc00362:35713 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:13,082 INFO [RS:2;9a1fddc00362:35713 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T15:49:13,082 DEBUG [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,46235,1731340149731 2024-11-11T15:49:13,082 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555 2024-11-11T15:49:13,082 DEBUG [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,35713,1731340149829 2024-11-11T15:49:13,083 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9a1fddc00362,37975,1731340149555] 2024-11-11T15:49:13,083 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9a1fddc00362,46235,1731340149731] 2024-11-11T15:49:13,083 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9a1fddc00362,35713,1731340149829] 2024-11-11T15:49:13,107 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T15:49:13,120 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T15:49:13,136 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T15:49:13,136 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T15:49:13,136 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T15:49:13,151 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T15:49:13,151 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T15:49:13,151 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T15:49:13,158 INFO [RS:0;9a1fddc00362:37975 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T15:49:13,158 INFO [RS:1;9a1fddc00362:46235 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T15:49:13,158 INFO [RS:2;9a1fddc00362:35713 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T15:49:13,158 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,158 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,158 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,159 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T15:49:13,160 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T15:49:13,164 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T15:49:13,166 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T15:49:13,167 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,168 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,168 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,168 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T15:49:13,168 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,168 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,168 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,169 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T15:49:13,169 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,169 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:13,169 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,169 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:13,170 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:13,170 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,170 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:13,171 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:13,171 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:13,171 DEBUG [RS:1;9a1fddc00362:46235 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:13,171 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:13,171 DEBUG [RS:0;9a1fddc00362:37975 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:13,172 DEBUG [RS:2;9a1fddc00362:35713 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:13,177 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,37975,1731340149555-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,46235,1731340149731-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:13,177 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,35713,1731340149829-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:13,199 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T15:49:13,201 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T15:49:13,202 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,37975,1731340149555-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,202 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,46235,1731340149731-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,202 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,202 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,202 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.Replication(171): 9a1fddc00362,37975,1731340149555 started 2024-11-11T15:49:13,202 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.Replication(171): 9a1fddc00362,46235,1731340149731 started 2024-11-11T15:49:13,204 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T15:49:13,204 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,35713,1731340149829-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,205 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,205 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.Replication(171): 9a1fddc00362,35713,1731340149829 started 2024-11-11T15:49:13,225 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,226 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1482): Serving as 9a1fddc00362,37975,1731340149555, RpcServer on 9a1fddc00362/172.17.0.3:37975, sessionid=0x1019805905b0001 2024-11-11T15:49:13,226 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T15:49:13,227 DEBUG [RS:0;9a1fddc00362:37975 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9a1fddc00362,37975,1731340149555 2024-11-11T15:49:13,227 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,37975,1731340149555' 2024-11-11T15:49:13,227 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T15:49:13,231 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T15:49:13,232 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T15:49:13,232 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T15:49:13,232 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,233 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:13,233 DEBUG [RS:0;9a1fddc00362:37975 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9a1fddc00362,37975,1731340149555 2024-11-11T15:49:13,233 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(1482): Serving as 9a1fddc00362,46235,1731340149731, RpcServer on 9a1fddc00362/172.17.0.3:46235, sessionid=0x1019805905b0002 2024-11-11T15:49:13,233 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,37975,1731340149555' 2024-11-11T15:49:13,233 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T15:49:13,233 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T15:49:13,233 DEBUG [RS:1;9a1fddc00362:46235 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9a1fddc00362,46235,1731340149731 2024-11-11T15:49:13,233 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(1482): Serving as 9a1fddc00362,35713,1731340149829, RpcServer on 9a1fddc00362/172.17.0.3:35713, sessionid=0x1019805905b0003 2024-11-11T15:49:13,233 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,46235,1731340149731' 2024-11-11T15:49:13,233 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T15:49:13,234 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T15:49:13,234 DEBUG [RS:2;9a1fddc00362:35713 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9a1fddc00362,35713,1731340149829 2024-11-11T15:49:13,234 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,35713,1731340149829' 2024-11-11T15:49:13,234 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T15:49:13,238 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T15:49:13,239 DEBUG [RS:0;9a1fddc00362:37975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T15:49:13,239 INFO [RS:0;9a1fddc00362:37975 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T15:49:13,239 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T15:49:13,239 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T15:49:13,239 INFO [RS:0;9a1fddc00362:37975 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T15:49:13,240 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T15:49:13,240 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T15:49:13,240 DEBUG [RS:2;9a1fddc00362:35713 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9a1fddc00362,35713,1731340149829 2024-11-11T15:49:13,240 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,35713,1731340149829' 2024-11-11T15:49:13,240 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T15:49:13,240 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T15:49:13,241 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T15:49:13,241 DEBUG [RS:1;9a1fddc00362:46235 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9a1fddc00362,46235,1731340149731 2024-11-11T15:49:13,241 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,46235,1731340149731' 2024-11-11T15:49:13,241 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T15:49:13,241 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T15:49:13,242 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T15:49:13,243 DEBUG [RS:1;9a1fddc00362:46235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T15:49:13,244 INFO [RS:1;9a1fddc00362:46235 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T15:49:13,244 INFO [RS:1;9a1fddc00362:46235 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T15:49:13,247 DEBUG [RS:2;9a1fddc00362:35713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T15:49:13,247 INFO [RS:2;9a1fddc00362:35713 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T15:49:13,247 INFO [RS:2;9a1fddc00362:35713 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T15:49:13,271 WARN [9a1fddc00362:41091 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T15:49:13,346 INFO [RS:0;9a1fddc00362:37975 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T15:49:13,348 INFO [RS:1;9a1fddc00362:46235 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T15:49:13,348 INFO [RS:2;9a1fddc00362:35713 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T15:49:13,358 INFO [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C37975%2C1731340149555, suffix=, logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555, archiveDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs, maxLogs=32 2024-11-11T15:49:13,359 INFO [RS:2;9a1fddc00362:35713 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C35713%2C1731340149829, suffix=, logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,35713,1731340149829, archiveDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs, maxLogs=32 2024-11-11T15:49:13,373 INFO [RS:1;9a1fddc00362:46235 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C46235%2C1731340149731, suffix=, logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,46235,1731340149731, archiveDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs, maxLogs=32 2024-11-11T15:49:13,395 DEBUG [RS:2;9a1fddc00362:35713 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,35713,1731340149829/9a1fddc00362%2C35713%2C1731340149829.1731340153370, exclude list is [], retry=0 2024-11-11T15:49:13,397 DEBUG [RS:1;9a1fddc00362:46235 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,46235,1731340149731/9a1fddc00362%2C46235%2C1731340149731.1731340153376, exclude list is [], retry=0 2024-11-11T15:49:13,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36465,DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f,DISK] 2024-11-11T15:49:13,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9,DISK] 2024-11-11T15:49:13,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-494bae4f-2893-45b5-a6db-0c68f398cc5e,DISK] 2024-11-11T15:49:13,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36465,DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f,DISK] 2024-11-11T15:49:13,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9,DISK] 2024-11-11T15:49:13,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-494bae4f-2893-45b5-a6db-0c68f398cc5e,DISK] 2024-11-11T15:49:13,408 DEBUG [RS:0;9a1fddc00362:37975 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555/9a1fddc00362%2C37975%2C1731340149555.1731340153370, exclude list is [], retry=0 2024-11-11T15:49:13,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9,DISK] 2024-11-11T15:49:13,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36465,DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f,DISK] 2024-11-11T15:49:13,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-494bae4f-2893-45b5-a6db-0c68f398cc5e,DISK] 2024-11-11T15:49:13,516 INFO [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555/9a1fddc00362%2C37975%2C1731340149555.1731340153370 2024-11-11T15:49:13,516 INFO [RS:2;9a1fddc00362:35713 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,35713,1731340149829/9a1fddc00362%2C35713%2C1731340149829.1731340153370 2024-11-11T15:49:13,516 INFO [RS:1;9a1fddc00362:46235 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,46235,1731340149731/9a1fddc00362%2C46235%2C1731340149731.1731340153376 2024-11-11T15:49:13,519 DEBUG [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44127:44127),(127.0.0.1/127.0.0.1:37769:37769),(127.0.0.1/127.0.0.1:41835:41835)] 2024-11-11T15:49:13,519 DEBUG [RS:2;9a1fddc00362:35713 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41835:41835),(127.0.0.1/127.0.0.1:44127:44127),(127.0.0.1/127.0.0.1:37769:37769)] 2024-11-11T15:49:13,520 DEBUG [RS:1;9a1fddc00362:46235 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41835:41835),(127.0.0.1/127.0.0.1:37769:37769),(127.0.0.1/127.0.0.1:44127:44127)] 2024-11-11T15:49:13,775 DEBUG [9a1fddc00362:41091 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T15:49:13,787 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(204): Hosts are {9a1fddc00362=0} racks are {/default-rack=0} 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T15:49:13,796 INFO [9a1fddc00362:41091 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T15:49:13,796 INFO [9a1fddc00362:41091 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T15:49:13,796 INFO [9a1fddc00362:41091 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T15:49:13,796 DEBUG [9a1fddc00362:41091 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T15:49:13,805 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9a1fddc00362,37975,1731340149555 2024-11-11T15:49:13,814 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9a1fddc00362,37975,1731340149555, state=OPENING 2024-11-11T15:49:13,819 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T15:49:13,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:13,822 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:13,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:13,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:13,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:13,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:13,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:13,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:13,827 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T15:49:13,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9a1fddc00362,37975,1731340149555}] 2024-11-11T15:49:14,019 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T15:49:14,022 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53813, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T15:49:14,038 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T15:49:14,038 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T15:49:14,039 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-11T15:49:14,043 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C37975%2C1731340149555.meta, suffix=.meta, logDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555, archiveDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs, maxLogs=32 2024-11-11T15:49:14,063 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555/9a1fddc00362%2C37975%2C1731340149555.meta.1731340154045.meta, exclude list is [], retry=0 2024-11-11T15:49:14,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36465,DS-6777dac3-fbf5-40c8-bdda-b3e5a7e2085f,DISK] 2024-11-11T15:49:14,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-494bae4f-2893-45b5-a6db-0c68f398cc5e,DISK] 2024-11-11T15:49:14,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-9d7f9eef-a4d8-47c0-921d-83c97f6fe7a9,DISK] 2024-11-11T15:49:14,097 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555/9a1fddc00362%2C37975%2C1731340149555.meta.1731340154045.meta 2024-11-11T15:49:14,097 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41835:41835),(127.0.0.1/127.0.0.1:44127:44127),(127.0.0.1/127.0.0.1:37769:37769)] 2024-11-11T15:49:14,098 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T15:49:14,100 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T15:49:14,104 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T15:49:14,110 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T15:49:14,116 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T15:49:14,117 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:14,117 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T15:49:14,117 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T15:49:14,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T15:49:14,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T15:49:14,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:14,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:14,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T15:49:14,134 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T15:49:14,134 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:14,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:14,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T15:49:14,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T15:49:14,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:14,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:14,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T15:49:14,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T15:49:14,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:14,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:14,142 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T15:49:14,143 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740 2024-11-11T15:49:14,151 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740 2024-11-11T15:49:14,155 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T15:49:14,155 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T15:49:14,157 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T15:49:14,163 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T15:49:14,165 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69916374, jitterRate=0.04183515906333923}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T15:49:14,166 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T15:49:14,168 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731340154118Writing region info on filesystem at 1731340154118Initializing all the Stores at 1731340154122 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340154123 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340154124 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340154124Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340154124Cleaning up temporary data from old regions at 1731340154156 (+32 ms)Running coprocessor post-open hooks at 1731340154166 (+10 ms)Region opened successfully at 1731340154168 (+2 ms) 2024-11-11T15:49:14,181 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731340154005 2024-11-11T15:49:14,199 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T15:49:14,201 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T15:49:14,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9a1fddc00362,37975,1731340149555 2024-11-11T15:49:14,213 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9a1fddc00362,37975,1731340149555, state=OPEN 2024-11-11T15:49:14,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:14,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:14,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:14,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:14,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:14,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:14,225 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9a1fddc00362,37975,1731340149555 2024-11-11T15:49:14,228 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:14,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:14,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T15:49:14,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9a1fddc00362,37975,1731340149555 in 396 msec 2024-11-11T15:49:14,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T15:49:14,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2220 sec 2024-11-11T15:49:14,256 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:14,257 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T15:49:14,289 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T15:49:14,291 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9a1fddc00362,37975,1731340149555, seqNum=-1] 2024-11-11T15:49:14,323 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T15:49:14,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57075, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T15:49:14,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.9810 sec 2024-11-11T15:49:14,405 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731340154405, completionTime=-1 2024-11-11T15:49:14,414 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T15:49:14,414 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T15:49:14,460 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-11T15:49:14,460 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731340214460 2024-11-11T15:49:14,460 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731340274460 2024-11-11T15:49:14,461 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 46 msec 2024-11-11T15:49:14,463 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-11T15:49:14,474 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,41091,1731340148573-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,474 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,41091,1731340148573-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,475 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,41091,1731340148573-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,477 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9a1fddc00362:41091, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,484 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,489 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,490 DEBUG [master/9a1fddc00362:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T15:49:14,524 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.544sec 2024-11-11T15:49:14,529 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T15:49:14,530 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T15:49:14,531 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T15:49:14,532 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T15:49:14,532 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T15:49:14,533 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,41091,1731340148573-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:14,533 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,41091,1731340148573-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T15:49:14,541 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T15:49:14,542 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T15:49:14,543 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,41091,1731340148573-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:14,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5593da80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T15:49:14,631 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T15:49:14,631 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T15:49:14,636 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9a1fddc00362,41091,-1 for getting cluster id 2024-11-11T15:49:14,640 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T15:49:14,655 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f3a4f029-de6a-4cce-b975-9a08c2c3053f' 2024-11-11T15:49:14,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T15:49:14,659 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f3a4f029-de6a-4cce-b975-9a08c2c3053f" 2024-11-11T15:49:14,659 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6faa619d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T15:49:14,659 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9a1fddc00362,41091,-1] 2024-11-11T15:49:14,663 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T15:49:14,666 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:14,668 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43738, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T15:49:14,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c113695, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T15:49:14,673 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T15:49:14,682 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9a1fddc00362,37975,1731340149555, seqNum=-1] 2024-11-11T15:49:14,683 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T15:49:14,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56240, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T15:49:14,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9a1fddc00362,41091,1731340148573 2024-11-11T15:49:14,746 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T15:49:14,758 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 9a1fddc00362,41091,1731340148573 2024-11-11T15:49:14,762 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f5aa523 2024-11-11T15:49:14,763 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T15:49:14,766 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T15:49:14,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T15:49:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-11T15:49:14,797 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T15:49:14,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-11T15:49:14,801 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:14,807 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T15:49:14,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:14,822 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:14,822 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:14,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:51828 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51828 dst: /127.0.0.1:39241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:14,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-11T15:49:14,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:15,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:15,270 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:15,274 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4791c5da1d6bf471d21f8b51ad0ce67a, NAME => 'TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04 2024-11-11T15:49:15,282 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:15,283 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:15,288 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:51846 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51846 dst: /127.0.0.1:39241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:15,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-11T15:49:15,300 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:15,300 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:15,301 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4791c5da1d6bf471d21f8b51ad0ce67a, disabling compactions & flushes 2024-11-11T15:49:15,301 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,301 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,301 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. after waiting 0 ms 2024-11-11T15:49:15,301 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,301 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,301 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4791c5da1d6bf471d21f8b51ad0ce67a: Waiting for close lock at 1731340155301Disabling compacts and flushes for region at 1731340155301Disabling writes for close at 1731340155301Writing region close event to WAL at 1731340155301Closed at 1731340155301 2024-11-11T15:49:15,306 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T15:49:15,313 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731340155306"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731340155306"}]},"ts":"1731340155306"} 2024-11-11T15:49:15,328 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T15:49:15,332 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T15:49:15,337 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731340155333"}]},"ts":"1731340155333"} 2024-11-11T15:49:15,347 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-11T15:49:15,348 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {9a1fddc00362=0} racks are {/default-rack=0} 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T15:49:15,353 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T15:49:15,353 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T15:49:15,353 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T15:49:15,353 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T15:49:15,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4791c5da1d6bf471d21f8b51ad0ce67a, ASSIGN}] 2024-11-11T15:49:15,362 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4791c5da1d6bf471d21f8b51ad0ce67a, ASSIGN 2024-11-11T15:49:15,366 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4791c5da1d6bf471d21f8b51ad0ce67a, ASSIGN; state=OFFLINE, location=9a1fddc00362,37975,1731340149555; forceNewPlan=false, retain=false 2024-11-11T15:49:15,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-11T15:49:15,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-11T15:49:15,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:15,520 INFO [9a1fddc00362:41091 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T15:49:15,521 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4791c5da1d6bf471d21f8b51ad0ce67a, regionState=OPENING, regionLocation=9a1fddc00362,37975,1731340149555 2024-11-11T15:49:15,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4791c5da1d6bf471d21f8b51ad0ce67a, ASSIGN because future has completed 2024-11-11T15:49:15,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4791c5da1d6bf471d21f8b51ad0ce67a, server=9a1fddc00362,37975,1731340149555}] 2024-11-11T15:49:15,704 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,705 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4791c5da1d6bf471d21f8b51ad0ce67a, NAME => 'TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a.', STARTKEY => '', ENDKEY => ''} 2024-11-11T15:49:15,705 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,705 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:15,706 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,706 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,716 INFO [StoreOpener-4791c5da1d6bf471d21f8b51ad0ce67a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,728 INFO [StoreOpener-4791c5da1d6bf471d21f8b51ad0ce67a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4791c5da1d6bf471d21f8b51ad0ce67a columnFamilyName cf 2024-11-11T15:49:15,728 DEBUG [StoreOpener-4791c5da1d6bf471d21f8b51ad0ce67a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:15,730 INFO [StoreOpener-4791c5da1d6bf471d21f8b51ad0ce67a-1 {}] regionserver.HStore(327): Store=4791c5da1d6bf471d21f8b51ad0ce67a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:15,732 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,736 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,738 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,739 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,739 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,744 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-11T15:49:15,798 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T15:49:15,800 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4791c5da1d6bf471d21f8b51ad0ce67a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61917019, jitterRate=-0.07736451923847198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T15:49:15,800 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:15,802 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4791c5da1d6bf471d21f8b51ad0ce67a: Running coprocessor pre-open hook at 1731340155706Writing region info on filesystem at 1731340155706Initializing all the Stores at 1731340155714 (+8 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340155714Cleaning up temporary data from old regions at 1731340155739 (+25 ms)Running coprocessor post-open hooks at 1731340155800 (+61 ms)Region opened successfully at 1731340155801 (+1 ms) 2024-11-11T15:49:15,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-11T15:49:15,804 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a., pid=6, masterSystemTime=1731340155688 2024-11-11T15:49:15,809 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,809 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:15,811 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4791c5da1d6bf471d21f8b51ad0ce67a, regionState=OPEN, openSeqNum=2, regionLocation=9a1fddc00362,37975,1731340149555 2024-11-11T15:49:15,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4791c5da1d6bf471d21f8b51ad0ce67a, server=9a1fddc00362,37975,1731340149555 because future has completed 2024-11-11T15:49:15,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T15:49:15,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4791c5da1d6bf471d21f8b51ad0ce67a, server=9a1fddc00362,37975,1731340149555 in 301 msec 2024-11-11T15:49:15,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T15:49:15,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4791c5da1d6bf471d21f8b51ad0ce67a, ASSIGN in 485 msec 2024-11-11T15:49:15,848 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T15:49:15,848 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731340155848"}]},"ts":"1731340155848"} 2024-11-11T15:49:15,851 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-11T15:49:15,853 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T15:49:15,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 1.0710 sec 2024-11-11T15:49:15,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:15,958 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T15:49:15,958 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-11T15:49:15,960 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T15:49:15,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-11T15:49:15,967 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T15:49:15,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-11T15:49:15,977 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a., hostname=9a1fddc00362,37975,1731340149555, seqNum=2] 2024-11-11T15:49:15,987 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-11T15:49:15,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-11T15:49:15,995 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-11T15:49:15,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:15,997 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T15:49:15,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T15:49:16,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:16,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37975 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T15:49:16,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,169 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4791c5da1d6bf471d21f8b51ad0ce67a 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-11T15:49:16,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/.tmp/cf/0d84375ddbf54fca9806a1890b12001f is 36, key is row/cf:cq/1731340155979/Put/seqid=0 2024-11-11T15:49:16,285 WARN [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:16,286 WARN [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:16,318 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675335237_22 at /127.0.0.1:59156 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59156 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:16,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:16,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-11T15:49:16,334 WARN [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:16,334 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/.tmp/cf/0d84375ddbf54fca9806a1890b12001f 2024-11-11T15:49:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/.tmp/cf/0d84375ddbf54fca9806a1890b12001f as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/cf/0d84375ddbf54fca9806a1890b12001f 2024-11-11T15:49:16,425 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/cf/0d84375ddbf54fca9806a1890b12001f, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T15:49:16,433 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4791c5da1d6bf471d21f8b51ad0ce67a in 263ms, sequenceid=5, compaction requested=false 2024-11-11T15:49:16,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-11T15:49:16,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4791c5da1d6bf471d21f8b51ad0ce67a: 2024-11-11T15:49:16,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T15:49:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T15:49:16,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T15:49:16,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 449 msec 2024-11-11T15:49:16,458 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 467 msec 2024-11-11T15:49:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41091 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:16,629 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T15:49:16,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T15:49:16,652 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T15:49:16,652 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:16,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,660 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T15:49:16,661 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T15:49:16,661 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1744643541, stopped=false 2024-11-11T15:49:16,661 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9a1fddc00362,41091,1731340148573 2024-11-11T15:49:16,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:16,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:16,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:16,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:16,664 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T15:49:16,664 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:16,664 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:16,664 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T15:49:16,664 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:16,664 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:16,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:16,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9a1fddc00362,37975,1731340149555' ***** 2024-11-11T15:49:16,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T15:49:16,666 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:16,666 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T15:49:16,666 INFO [RS:0;9a1fddc00362:37975 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T15:49:16,667 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:16,667 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T15:49:16,667 INFO [RS:0;9a1fddc00362:37975 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T15:49:16,667 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(3091): Received CLOSE for 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:16,668 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9a1fddc00362,46235,1731340149731' ***** 2024-11-11T15:49:16,668 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T15:49:16,668 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9a1fddc00362,35713,1731340149829' ***** 2024-11-11T15:49:16,668 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T15:49:16,668 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:16,668 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T15:49:16,668 INFO [RS:2;9a1fddc00362:35713 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T15:49:16,668 INFO [RS:2;9a1fddc00362:35713 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T15:49:16,668 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(959): stopping server 9a1fddc00362,35713,1731340149829 2024-11-11T15:49:16,668 INFO [RS:2;9a1fddc00362:35713 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:16,669 INFO [RS:2;9a1fddc00362:35713 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9a1fddc00362:35713. 2024-11-11T15:49:16,669 DEBUG [RS:2;9a1fddc00362:35713 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:16,669 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T15:49:16,669 DEBUG [RS:2;9a1fddc00362:35713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,669 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:16,669 INFO [RS:1;9a1fddc00362:46235 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T15:49:16,669 INFO [RS:1;9a1fddc00362:46235 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T15:49:16,669 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(959): stopping server 9a1fddc00362,46235,1731340149731 2024-11-11T15:49:16,669 INFO [RS:1;9a1fddc00362:46235 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:16,669 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(976): stopping server 9a1fddc00362,35713,1731340149829; all regions closed. 2024-11-11T15:49:16,669 INFO [RS:1;9a1fddc00362:46235 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9a1fddc00362:46235. 2024-11-11T15:49:16,669 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(959): stopping server 9a1fddc00362,37975,1731340149555 2024-11-11T15:49:16,669 INFO [RS:0;9a1fddc00362:37975 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:16,670 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T15:49:16,670 INFO [RS:0;9a1fddc00362:37975 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9a1fddc00362:37975. 2024-11-11T15:49:16,670 DEBUG [RS:0;9a1fddc00362:37975 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:16,670 DEBUG [RS:0;9a1fddc00362:37975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,670 DEBUG [RS:1;9a1fddc00362:46235 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:16,670 DEBUG [RS:1;9a1fddc00362:46235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,670 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T15:49:16,670 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T15:49:16,670 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(976): stopping server 9a1fddc00362,46235,1731340149731; all regions closed. 2024-11-11T15:49:16,670 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T15:49:16,670 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T15:49:16,671 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T15:49:16,672 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T15:49:16,673 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1325): Online Regions={4791c5da1d6bf471d21f8b51ad0ce67a=TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T15:49:16,673 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4791c5da1d6bf471d21f8b51ad0ce67a 2024-11-11T15:49:16,673 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4791c5da1d6bf471d21f8b51ad0ce67a, disabling compactions & flushes 2024-11-11T15:49:16,675 INFO [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,675 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,675 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. after waiting 0 ms 2024-11-11T15:49:16,675 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,676 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T15:49:16,676 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T15:49:16,676 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T15:49:16,676 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T15:49:16,676 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T15:49:16,677 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-11T15:49:16,686 INFO [regionserver/9a1fddc00362:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:16,689 INFO [regionserver/9a1fddc00362:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:16,691 INFO [regionserver/9a1fddc00362:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:16,702 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,35713,1731340149829/9a1fddc00362%2C35713%2C1731340149829.1731340153370 not finished, retry = 0 2024-11-11T15:49:16,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741826_1016 (size=93) 2024-11-11T15:49:16,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741826_1016 (size=93) 2024-11-11T15:49:16,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_1073741826_1016 (size=93) 2024-11-11T15:49:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741827_1017 (size=93) 2024-11-11T15:49:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_1073741827_1017 (size=93) 2024-11-11T15:49:16,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741827_1017 (size=93) 2024-11-11T15:49:16,728 DEBUG [RS:1;9a1fddc00362:46235 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs 2024-11-11T15:49:16,728 INFO [RS:1;9a1fddc00362:46235 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9a1fddc00362%2C46235%2C1731340149731:(num 1731340153376) 2024-11-11T15:49:16,729 DEBUG [RS:1;9a1fddc00362:46235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,729 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:16,729 INFO [RS:1;9a1fddc00362:46235 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:16,729 INFO [RS:1;9a1fddc00362:46235 {}] hbase.ChoreService(370): Chore service for: regionserver/9a1fddc00362:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:16,730 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T15:49:16,730 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T15:49:16,730 INFO [regionserver/9a1fddc00362:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:16,730 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T15:49:16,730 INFO [RS:1;9a1fddc00362:46235 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:16,730 INFO [RS:1;9a1fddc00362:46235 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46235 2024-11-11T15:49:16,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9a1fddc00362,46235,1731340149731 2024-11-11T15:49:16,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:16,740 INFO [RS:1;9a1fddc00362:46235 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:16,753 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9a1fddc00362,46235,1731340149731] 2024-11-11T15:49:16,757 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9a1fddc00362,46235,1731340149731 already deleted, retry=false 2024-11-11T15:49:16,757 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9a1fddc00362,46235,1731340149731 expired; onlineServers=2 2024-11-11T15:49:16,768 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/default/TestHBaseWalOnEC/4791c5da1d6bf471d21f8b51ad0ce67a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T15:49:16,769 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/info/f1ef33bc296c4d689daff2bb385eb9c3 is 153, key is TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a./info:regioninfo/1731340155811/Put/seqid=0 2024-11-11T15:49:16,772 INFO [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,773 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4791c5da1d6bf471d21f8b51ad0ce67a: Waiting for close lock at 1731340156673Running coprocessor pre-close hooks at 1731340156673Disabling compacts and flushes for region at 1731340156673Disabling writes for close at 1731340156675 (+2 ms)Writing region close event to WAL at 1731340156716 (+41 ms)Running coprocessor post-close hooks at 1731340156770 (+54 ms)Closed at 1731340156772 (+2 ms) 2024-11-11T15:49:16,773 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731340154767.4791c5da1d6bf471d21f8b51ad0ce67a. 2024-11-11T15:49:16,774 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:16,775 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:16,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675335237_22 at /127.0.0.1:51892 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51892 dst: /127.0.0.1:39241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:16,829 DEBUG [RS:2;9a1fddc00362:35713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs 2024-11-11T15:49:16,829 INFO [RS:2;9a1fddc00362:35713 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9a1fddc00362%2C35713%2C1731340149829:(num 1731340153370) 2024-11-11T15:49:16,829 DEBUG [RS:2;9a1fddc00362:35713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:16,829 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:16,830 INFO [RS:2;9a1fddc00362:35713 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:16,832 INFO [RS:2;9a1fddc00362:35713 {}] hbase.ChoreService(370): Chore service for: regionserver/9a1fddc00362:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:16,832 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T15:49:16,832 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T15:49:16,833 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T15:49:16,833 INFO [RS:2;9a1fddc00362:35713 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:16,833 INFO [RS:2;9a1fddc00362:35713 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35713 2024-11-11T15:49:16,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-11T15:49:16,836 INFO [regionserver/9a1fddc00362:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:16,837 INFO [RS:2;9a1fddc00362:35713 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:16,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9a1fddc00362,35713,1731340149829 2024-11-11T15:49:16,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:16,843 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:16,844 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/info/f1ef33bc296c4d689daff2bb385eb9c3 2024-11-11T15:49:16,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9a1fddc00362,35713,1731340149829] 2024-11-11T15:49:16,849 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9a1fddc00362,35713,1731340149829 already deleted, retry=false 2024-11-11T15:49:16,849 INFO [RS:1;9a1fddc00362:46235 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:16,849 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9a1fddc00362,35713,1731340149829 expired; onlineServers=1 2024-11-11T15:49:16,850 INFO [RS:1;9a1fddc00362:46235 {}] regionserver.HRegionServer(1031): Exiting; stopping=9a1fddc00362,46235,1731340149731; zookeeper connection closed. 2024-11-11T15:49:16,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:16,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46235-0x1019805905b0002, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:16,853 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@15317db5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@15317db5 2024-11-11T15:49:16,874 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:16,920 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/ns/99983f3965c847bca1f17a781b85c0ed is 43, key is default/ns:d/1731340154336/Put/seqid=0 2024-11-11T15:49:16,923 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:16,924 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:16,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675335237_22 at /127.0.0.1:45340 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45340 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:16,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-11T15:49:16,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:16,941 INFO [RS:2;9a1fddc00362:35713 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:16,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35713-0x1019805905b0003, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:16,941 INFO [RS:2;9a1fddc00362:35713 {}] regionserver.HRegionServer(1031): Exiting; stopping=9a1fddc00362,35713,1731340149829; zookeeper connection closed. 2024-11-11T15:49:16,942 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7030ec23 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7030ec23 2024-11-11T15:49:17,074 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:17,177 INFO [regionserver/9a1fddc00362:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T15:49:17,177 INFO [regionserver/9a1fddc00362:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T15:49:17,275 DEBUG [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:17,339 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:17,340 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/ns/99983f3965c847bca1f17a781b85c0ed 2024-11-11T15:49:17,378 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/table/19aa560813c6490d94815bcfdc9dc0b9 is 52, key is TestHBaseWalOnEC/table:state/1731340155848/Put/seqid=0 2024-11-11T15:49:17,381 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,381 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,393 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675335237_22 at /127.0.0.1:51910 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:39241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51910 dst: /127.0.0.1:39241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:17,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-11T15:49:17,400 WARN [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:17,400 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/table/19aa560813c6490d94815bcfdc9dc0b9 2024-11-11T15:49:17,414 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/info/f1ef33bc296c4d689daff2bb385eb9c3 as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/info/f1ef33bc296c4d689daff2bb385eb9c3 2024-11-11T15:49:17,427 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/info/f1ef33bc296c4d689daff2bb385eb9c3, entries=10, sequenceid=11, filesize=6.5 K 2024-11-11T15:49:17,430 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/ns/99983f3965c847bca1f17a781b85c0ed as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/ns/99983f3965c847bca1f17a781b85c0ed 2024-11-11T15:49:17,441 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/ns/99983f3965c847bca1f17a781b85c0ed, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T15:49:17,443 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/.tmp/table/19aa560813c6490d94815bcfdc9dc0b9 as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/table/19aa560813c6490d94815bcfdc9dc0b9 2024-11-11T15:49:17,454 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/table/19aa560813c6490d94815bcfdc9dc0b9, entries=2, sequenceid=11, filesize=5.1 K 2024-11-11T15:49:17,457 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 780ms, sequenceid=11, compaction requested=false 2024-11-11T15:49:17,457 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T15:49:17,469 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T15:49:17,470 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T15:49:17,470 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T15:49:17,470 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731340156676Running coprocessor pre-close hooks at 1731340156676Disabling compacts and flushes for region at 1731340156676Disabling writes for close at 1731340156676Obtaining lock to block concurrent updates at 1731340156677 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731340156677Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731340156677Flushing stores of hbase:meta,,1.1588230740 at 1731340156680 (+3 ms)Flushing 1588230740/info: creating writer at 1731340156680Flushing 1588230740/info: appending metadata at 1731340156762 (+82 ms)Flushing 1588230740/info: closing flushed file at 1731340156763 (+1 ms)Flushing 1588230740/ns: creating writer at 1731340156887 (+124 ms)Flushing 1588230740/ns: appending metadata at 1731340156919 (+32 ms)Flushing 1588230740/ns: closing flushed file at 1731340156919Flushing 1588230740/table: creating writer at 1731340157353 (+434 ms)Flushing 1588230740/table: appending metadata at 1731340157376 (+23 ms)Flushing 1588230740/table: closing flushed file at 1731340157377 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20692c85: reopening flushed file at 1731340157413 (+36 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57c9145c: reopening flushed file at 1731340157428 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dc32422: reopening flushed file at 1731340157441 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 780ms, sequenceid=11, compaction requested=false at 1731340157457 (+16 ms)Writing region close event to WAL at 1731340157460 (+3 ms)Running coprocessor post-close hooks at 1731340157470 (+10 ms)Closed at 1731340157470 2024-11-11T15:49:17,470 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T15:49:17,475 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(976): stopping server 9a1fddc00362,37975,1731340149555; all regions closed. 2024-11-11T15:49:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741829_1019 (size=2751) 2024-11-11T15:49:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741829_1019 (size=2751) 2024-11-11T15:49:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_1073741829_1019 (size=2751) 2024-11-11T15:49:17,485 DEBUG [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs 2024-11-11T15:49:17,485 INFO [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9a1fddc00362%2C37975%2C1731340149555.meta:.meta(num 1731340154045) 2024-11-11T15:49:17,492 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/WALs/9a1fddc00362,37975,1731340149555/9a1fddc00362%2C37975%2C1731340149555.1731340153370 not finished, retry = 0 2024-11-11T15:49:17,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741828_1018 (size=1298) 2024-11-11T15:49:17,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_1073741828_1018 (size=1298) 2024-11-11T15:49:17,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741828_1018 (size=1298) 2024-11-11T15:49:17,597 DEBUG [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/oldWALs 2024-11-11T15:49:17,597 INFO [RS:0;9a1fddc00362:37975 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9a1fddc00362%2C37975%2C1731340149555:(num 1731340153370) 2024-11-11T15:49:17,597 DEBUG [RS:0;9a1fddc00362:37975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:17,597 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:17,597 INFO [RS:0;9a1fddc00362:37975 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:17,597 INFO [RS:0;9a1fddc00362:37975 {}] hbase.ChoreService(370): Chore service for: regionserver/9a1fddc00362:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:17,598 INFO [RS:0;9a1fddc00362:37975 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:17,598 INFO [regionserver/9a1fddc00362:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:17,598 INFO [RS:0;9a1fddc00362:37975 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37975 2024-11-11T15:49:17,600 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9a1fddc00362,37975,1731340149555 2024-11-11T15:49:17,600 INFO [RS:0;9a1fddc00362:37975 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:17,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:17,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9a1fddc00362,37975,1731340149555] 2024-11-11T15:49:17,603 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9a1fddc00362,37975,1731340149555 already deleted, retry=false 2024-11-11T15:49:17,603 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9a1fddc00362,37975,1731340149555 expired; onlineServers=0 2024-11-11T15:49:17,603 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9a1fddc00362,41091,1731340148573' ***** 2024-11-11T15:49:17,603 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T15:49:17,604 INFO [M:0;9a1fddc00362:41091 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:17,604 INFO [M:0;9a1fddc00362:41091 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:17,604 DEBUG [M:0;9a1fddc00362:41091 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T15:49:17,604 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T15:49:17,604 DEBUG [M:0;9a1fddc00362:41091 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T15:49:17,604 DEBUG [master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.large.0-1731340152749 {}] cleaner.HFileCleaner(306): Exit Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.large.0-1731340152749,5,FailOnTimeoutGroup] 2024-11-11T15:49:17,604 DEBUG [master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.small.0-1731340152751 {}] cleaner.HFileCleaner(306): Exit Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.small.0-1731340152751,5,FailOnTimeoutGroup] 2024-11-11T15:49:17,605 INFO [M:0;9a1fddc00362:41091 {}] hbase.ChoreService(370): Chore service for: master/9a1fddc00362:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:17,605 INFO [M:0;9a1fddc00362:41091 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:17,605 DEBUG [M:0;9a1fddc00362:41091 {}] master.HMaster(1795): Stopping service threads 2024-11-11T15:49:17,605 INFO [M:0;9a1fddc00362:41091 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T15:49:17,605 INFO [M:0;9a1fddc00362:41091 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T15:49:17,606 INFO [M:0;9a1fddc00362:41091 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T15:49:17,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:17,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:17,606 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T15:49:17,607 DEBUG [M:0;9a1fddc00362:41091 {}] zookeeper.ZKUtil(347): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T15:49:17,607 WARN [M:0;9a1fddc00362:41091 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T15:49:17,608 INFO [M:0;9a1fddc00362:41091 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/.lastflushedseqids 2024-11-11T15:49:17,620 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,620 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45372 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45372 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:17,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-11T15:49:17,634 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:17,634 INFO [M:0;9a1fddc00362:41091 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T15:49:17,634 INFO [M:0;9a1fddc00362:41091 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T15:49:17,635 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T15:49:17,635 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:17,635 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:17,635 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T15:49:17,635 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:17,635 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-11T15:49:17,662 DEBUG [M:0;9a1fddc00362:41091 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc7eeddeab8a45c08e7cefe0ee4abb8e is 82, key is hbase:meta,,1/info:regioninfo/1731340154203/Put/seqid=0 2024-11-11T15:49:17,665 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,666 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45396 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45396 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:17,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-11T15:49:17,683 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:17,683 INFO [M:0;9a1fddc00362:41091 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc7eeddeab8a45c08e7cefe0ee4abb8e 2024-11-11T15:49:17,702 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:17,702 INFO [RS:0;9a1fddc00362:37975 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:17,702 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37975-0x1019805905b0001, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:17,702 INFO [RS:0;9a1fddc00362:37975 {}] regionserver.HRegionServer(1031): Exiting; stopping=9a1fddc00362,37975,1731340149555; zookeeper connection closed. 2024-11-11T15:49:17,707 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2bb06ece {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2bb06ece 2024-11-11T15:49:17,707 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T15:49:17,730 DEBUG [M:0;9a1fddc00362:41091 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/852cc464aab147fea018af55aa90091b is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731340155856/Put/seqid=0 2024-11-11T15:49:17,735 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,735 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:17,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45426 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45426 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:17,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775552_1037 (size=6439) 2024-11-11T15:49:18,157 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:18,158 INFO [M:0;9a1fddc00362:41091 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/852cc464aab147fea018af55aa90091b 2024-11-11T15:49:18,200 DEBUG [M:0;9a1fddc00362:41091 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0894cf556a96445da792a41e16b4e987 is 69, key is 9a1fddc00362,35713,1731340149829/rs:state/1731340152832/Put/seqid=0 2024-11-11T15:49:18,202 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:18,202 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T15:49:18,210 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-688913992_22 at /127.0.0.1:45440 [Receiving block BP-1086051422-172.17.0.3-1731340143188:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45440 dst: /127.0.0.1:36465 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T15:49:18,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-11T15:49:18,226 WARN [M:0;9a1fddc00362:41091 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T15:49:18,226 INFO [M:0;9a1fddc00362:41091 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0894cf556a96445da792a41e16b4e987 2024-11-11T15:49:18,245 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc7eeddeab8a45c08e7cefe0ee4abb8e as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bc7eeddeab8a45c08e7cefe0ee4abb8e 2024-11-11T15:49:18,259 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bc7eeddeab8a45c08e7cefe0ee4abb8e, entries=8, sequenceid=72, filesize=5.5 K 2024-11-11T15:49:18,262 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/852cc464aab147fea018af55aa90091b as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/852cc464aab147fea018af55aa90091b 2024-11-11T15:49:18,283 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/852cc464aab147fea018af55aa90091b, entries=8, sequenceid=72, filesize=6.3 K 2024-11-11T15:49:18,289 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0894cf556a96445da792a41e16b4e987 as hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0894cf556a96445da792a41e16b4e987 2024-11-11T15:49:18,309 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0894cf556a96445da792a41e16b4e987, entries=3, sequenceid=72, filesize=5.2 K 2024-11-11T15:49:18,314 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 678ms, sequenceid=72, compaction requested=false 2024-11-11T15:49:18,315 INFO [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:18,316 DEBUG [M:0;9a1fddc00362:41091 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731340157635Disabling compacts and flushes for region at 1731340157635Disabling writes for close at 1731340157635Obtaining lock to block concurrent updates at 1731340157635Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731340157635Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731340157636 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731340157637 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731340157638 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731340157662 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731340157662Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731340157693 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731340157729 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731340157729Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731340158170 (+441 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731340158199 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731340158199Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a129e97: reopening flushed file at 1731340158243 (+44 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3da422c3: reopening flushed file at 1731340158259 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@576831a1: reopening flushed file at 1731340158283 (+24 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 678ms, sequenceid=72, compaction requested=false at 1731340158314 (+31 ms)Writing region close event to WAL at 1731340158315 (+1 ms)Closed at 1731340158315 2024-11-11T15:49:18,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741825_1011 (size=32674) 2024-11-11T15:49:18,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_1073741825_1011 (size=32674) 2024-11-11T15:49:18,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741825_1011 (size=32674) 2024-11-11T15:49:18,331 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:18,332 INFO [M:0;9a1fddc00362:41091 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T15:49:18,332 INFO [M:0;9a1fddc00362:41091 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41091 2024-11-11T15:49:18,332 INFO [M:0;9a1fddc00362:41091 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:18,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775645_1025 (size=4787) 2024-11-11T15:49:18,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775612_1029 (size=5153) 2024-11-11T15:49:18,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775644_1025 (size=4787) 2024-11-11T15:49:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775661_1023 (size=51) 2024-11-11T15:49:18,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_-9223372036854775613_1029 (size=5153) 2024-11-11T15:49:18,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775660_1023 (size=51) 2024-11-11T15:49:18,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36465 is added to blk_-9223372036854775629_1027 (size=6637) 2024-11-11T15:49:18,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775628_1027 (size=6637) 2024-11-11T15:49:18,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:18,438 INFO [M:0;9a1fddc00362:41091 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:18,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41091-0x1019805905b0000, quorum=127.0.0.1:49707, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:18,444 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6cced15c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:18,447 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38003bf3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:18,447 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:18,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30f23eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:18,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e3331e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:18,454 WARN [BP-1086051422-172.17.0.3-1731340143188 heartbeating to localhost/127.0.0.1:36701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T15:49:18,454 WARN [BP-1086051422-172.17.0.3-1731340143188 heartbeating to localhost/127.0.0.1:36701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086051422-172.17.0.3-1731340143188 (Datanode Uuid a85cba08-0cf2-4670-82cd-3d9b8fc8ddff) service to localhost/127.0.0.1:36701 2024-11-11T15:49:18,456 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T15:49:18,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data5/current/BP-1086051422-172.17.0.3-1731340143188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:18,456 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T15:49:18,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data6/current/BP-1086051422-172.17.0.3-1731340143188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:18,457 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T15:49:18,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@328e0d16{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:18,462 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61998c4c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:18,462 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:18,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a445e53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:18,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@448e9acc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:18,471 WARN [BP-1086051422-172.17.0.3-1731340143188 heartbeating to localhost/127.0.0.1:36701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T15:49:18,471 WARN [BP-1086051422-172.17.0.3-1731340143188 heartbeating to localhost/127.0.0.1:36701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086051422-172.17.0.3-1731340143188 (Datanode Uuid cd03cfc1-bded-4164-ba54-85ee07d32e58) service to localhost/127.0.0.1:36701 2024-11-11T15:49:18,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T15:49:18,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T15:49:18,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data3/current/BP-1086051422-172.17.0.3-1731340143188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:18,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data4/current/BP-1086051422-172.17.0.3-1731340143188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:18,472 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T15:49:18,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea802a5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:18,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24c8c1e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:18,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:18,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d388052{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:18,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@716a4960{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:18,482 WARN [BP-1086051422-172.17.0.3-1731340143188 heartbeating to localhost/127.0.0.1:36701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T15:49:18,482 WARN [BP-1086051422-172.17.0.3-1731340143188 heartbeating to localhost/127.0.0.1:36701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086051422-172.17.0.3-1731340143188 (Datanode Uuid 8637a192-734f-4ac6-ace3-6fffce437bb3) service to localhost/127.0.0.1:36701 2024-11-11T15:49:18,483 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data1/current/BP-1086051422-172.17.0.3-1731340143188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:18,483 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/cluster_c629b883-0709-dd40-a9de-dfd6efe7058d/data/data2/current/BP-1086051422-172.17.0.3-1731340143188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:18,484 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T15:49:18,488 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T15:49:18,488 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T15:49:18,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T15:49:18,506 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:18,506 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:18,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:18,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:18,522 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T15:49:18,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T15:49:18,579 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 155), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1174 (was 1326), ProcessCount=11 (was 11), AvailableMemoryMB=2205 (was 2445) 2024-11-11T15:49:18,589 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=1174, ProcessCount=11, AvailableMemoryMB=2207 2024-11-11T15:49:18,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T15:49:18,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.log.dir so I do NOT create it in target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526 2024-11-11T15:49:18,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/83d1dd73-8cf4-459d-2eec-27264afc1558/hadoop.tmp.dir so I do NOT create it in target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526 2024-11-11T15:49:18,590 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946, deleteOnExit=true 2024-11-11T15:49:18,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T15:49:18,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/test.cache.data in system properties and HBase conf 2024-11-11T15:49:18,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T15:49:18,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir in system properties and HBase conf 2024-11-11T15:49:18,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T15:49:18,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T15:49:18,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T15:49:18,591 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T15:49:18,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T15:49:18,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T15:49:18,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T15:49:18,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T15:49:18,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/nfs.dump.dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/java.io.tmpdir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T15:49:18,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T15:49:18,697 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:18,702 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:18,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:18,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:18,704 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T15:49:18,705 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:18,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19064eca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:18,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69d5fb76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:18,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b0aa43d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/java.io.tmpdir/jetty-localhost-40663-hadoop-hdfs-3_4_1-tests_jar-_-any-12164808638017259053/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T15:49:18,863 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44402c27{HTTP/1.1, (http/1.1)}{localhost:40663} 2024-11-11T15:49:18,864 INFO [Time-limited test {}] server.Server(415): Started @20119ms 2024-11-11T15:49:19,036 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:19,045 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:19,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:19,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:19,054 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T15:49:19,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5810c87d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:19,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c8150f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:19,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:19,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:19,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:19,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3907d0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/java.io.tmpdir/jetty-localhost-44369-hadoop-hdfs-3_4_1-tests_jar-_-any-5525712502953035238/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:19,232 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@95e5bc7{HTTP/1.1, (http/1.1)}{localhost:44369} 2024-11-11T15:49:19,232 INFO [Time-limited test {}] server.Server(415): Started @20488ms 2024-11-11T15:49:19,234 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T15:49:19,346 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T15:49:19,378 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:19,447 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:19,448 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:19,463 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:19,469 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:19,472 WARN [Thread-548 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data1/current/BP-1366544186-172.17.0.3-1731340158641/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:19,473 WARN [Thread-549 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data2/current/BP-1366544186-172.17.0.3-1731340158641/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:19,484 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:19,484 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:19,485 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T15:49:19,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f68c25e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:19,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39eb5e5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:19,542 WARN [Thread-530 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T15:49:19,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20c6e1adac7a0139 with lease ID 0x876180991e449c22: Processing first storage report for DS-8d91ff69-5641-4ad6-81c7-d0e130cd85e5 from datanode DatanodeRegistration(127.0.0.1:45875, datanodeUuid=86ad93e0-a3f2-493b-bc93-84c5156c1249, infoPort=37217, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641) 2024-11-11T15:49:19,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20c6e1adac7a0139 with lease ID 0x876180991e449c22: from storage DS-8d91ff69-5641-4ad6-81c7-d0e130cd85e5 node DatanodeRegistration(127.0.0.1:45875, datanodeUuid=86ad93e0-a3f2-493b-bc93-84c5156c1249, infoPort=37217, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20c6e1adac7a0139 with lease ID 0x876180991e449c22: Processing first storage report for DS-f85155dc-0b2b-42f1-9c32-51a9a3324193 from datanode DatanodeRegistration(127.0.0.1:45875, datanodeUuid=86ad93e0-a3f2-493b-bc93-84c5156c1249, infoPort=37217, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641) 2024-11-11T15:49:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20c6e1adac7a0139 with lease ID 0x876180991e449c22: from storage DS-f85155dc-0b2b-42f1-9c32-51a9a3324193 node DatanodeRegistration(127.0.0.1:45875, datanodeUuid=86ad93e0-a3f2-493b-bc93-84c5156c1249, infoPort=37217, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:19,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25822ceb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/java.io.tmpdir/jetty-localhost-42347-hadoop-hdfs-3_4_1-tests_jar-_-any-17935256813968089278/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:19,653 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@645c4148{HTTP/1.1, (http/1.1)}{localhost:42347} 2024-11-11T15:49:19,654 INFO [Time-limited test {}] server.Server(415): Started @20909ms 2024-11-11T15:49:19,659 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T15:49:19,717 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T15:49:19,722 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T15:49:19,732 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T15:49:19,732 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T15:49:19,733 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T15:49:19,733 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61b58ba7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,AVAILABLE} 2024-11-11T15:49:19,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c9ea9ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T15:49:19,809 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data3/current/BP-1366544186-172.17.0.3-1731340158641/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:19,810 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data4/current/BP-1366544186-172.17.0.3-1731340158641/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:19,845 WARN [Thread-567 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T15:49:19,849 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e09a79c41c646cd with lease ID 0x876180991e449c23: Processing first storage report for DS-a160f7f2-42b2-404e-a6c0-e72aea23ef9f from datanode DatanodeRegistration(127.0.0.1:36483, datanodeUuid=ce923c52-f9f9-4f1e-b5c3-996b8c246eba, infoPort=32941, infoSecurePort=0, ipcPort=41945, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641) 2024-11-11T15:49:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e09a79c41c646cd with lease ID 0x876180991e449c23: from storage DS-a160f7f2-42b2-404e-a6c0-e72aea23ef9f node DatanodeRegistration(127.0.0.1:36483, datanodeUuid=ce923c52-f9f9-4f1e-b5c3-996b8c246eba, infoPort=32941, infoSecurePort=0, ipcPort=41945, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e09a79c41c646cd with lease ID 0x876180991e449c23: Processing first storage report for DS-008746bd-3c9a-45d7-9fb3-d7df5c343e4a from datanode DatanodeRegistration(127.0.0.1:36483, datanodeUuid=ce923c52-f9f9-4f1e-b5c3-996b8c246eba, infoPort=32941, infoSecurePort=0, ipcPort=41945, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641) 2024-11-11T15:49:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e09a79c41c646cd with lease ID 0x876180991e449c23: from storage DS-008746bd-3c9a-45d7-9fb3-d7df5c343e4a node DatanodeRegistration(127.0.0.1:36483, datanodeUuid=ce923c52-f9f9-4f1e-b5c3-996b8c246eba, infoPort=32941, infoSecurePort=0, ipcPort=41945, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:19,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2440b4fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/java.io.tmpdir/jetty-localhost-42673-hadoop-hdfs-3_4_1-tests_jar-_-any-15058905897610584632/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:19,910 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20272bbf{HTTP/1.1, (http/1.1)}{localhost:42673} 2024-11-11T15:49:19,910 INFO [Time-limited test {}] server.Server(415): Started @21165ms 2024-11-11T15:49:19,912 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T15:49:20,101 WARN [Thread-613 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data5/current/BP-1366544186-172.17.0.3-1731340158641/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:20,112 WARN [Thread-614 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data6/current/BP-1366544186-172.17.0.3-1731340158641/current, will proceed with Du for space computation calculation, 2024-11-11T15:49:20,172 WARN [Thread-602 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T15:49:20,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3dcb8a7fe35f760c with lease ID 0x876180991e449c24: Processing first storage report for DS-7a7a1001-52bf-40c1-8139-16582a39e19c from datanode DatanodeRegistration(127.0.0.1:40391, datanodeUuid=8975f652-06a0-46ad-8a5e-20c024604f57, infoPort=39775, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641) 2024-11-11T15:49:20,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3dcb8a7fe35f760c with lease ID 0x876180991e449c24: from storage DS-7a7a1001-52bf-40c1-8139-16582a39e19c node DatanodeRegistration(127.0.0.1:40391, datanodeUuid=8975f652-06a0-46ad-8a5e-20c024604f57, infoPort=39775, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T15:49:20,184 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3dcb8a7fe35f760c with lease ID 0x876180991e449c24: Processing first storage report for DS-ce652b98-7978-4a3b-abbb-02252510052b from datanode DatanodeRegistration(127.0.0.1:40391, datanodeUuid=8975f652-06a0-46ad-8a5e-20c024604f57, infoPort=39775, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641) 2024-11-11T15:49:20,184 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3dcb8a7fe35f760c with lease ID 0x876180991e449c24: from storage DS-ce652b98-7978-4a3b-abbb-02252510052b node DatanodeRegistration(127.0.0.1:40391, datanodeUuid=8975f652-06a0-46ad-8a5e-20c024604f57, infoPort=39775, infoSecurePort=0, ipcPort=41891, storageInfo=lv=-57;cid=testClusterID;nsid=1098929598;c=1731340158641), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T15:49:20,251 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526 2024-11-11T15:49:20,265 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/zookeeper_0, clientPort=51788, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T15:49:20,273 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51788 2024-11-11T15:49:20,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:20,276 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:20,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741825_1001 (size=7) 2024-11-11T15:49:20,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741825_1001 (size=7) 2024-11-11T15:49:20,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741825_1001 (size=7) 2024-11-11T15:49:20,785 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7 with version=8 2024-11-11T15:49:20,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36701/user/jenkins/test-data/d35d5aa0-7e73-df36-5f9c-19f6ff91da04/hbase-staging 2024-11-11T15:49:20,788 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:20,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:20,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:20,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:20,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:20,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:20,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T15:49:20,790 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:20,792 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40511 2024-11-11T15:49:20,794 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40511 connecting to ZooKeeper ensemble=127.0.0.1:51788 2024-11-11T15:49:20,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405110x0, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:20,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40511-0x1019805c26d0000 connected 2024-11-11T15:49:20,915 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:20,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:20,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:20,932 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7, hbase.cluster.distributed=false 2024-11-11T15:49:20,961 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:20,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40511 2024-11-11T15:49:20,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40511 2024-11-11T15:49:20,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40511 2024-11-11T15:49:21,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40511 2024-11-11T15:49:21,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40511 2024-11-11T15:49:21,031 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:21,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,031 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:21,032 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,032 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:21,032 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T15:49:21,032 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:21,036 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46573 2024-11-11T15:49:21,038 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46573 connecting to ZooKeeper ensemble=127.0.0.1:51788 2024-11-11T15:49:21,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,043 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:465730x0, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:21,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46573-0x1019805c26d0001 connected 2024-11-11T15:49:21,084 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:21,085 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T15:49:21,100 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T15:49:21,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T15:49:21,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:21,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-11T15:49:21,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46573 2024-11-11T15:49:21,139 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46573 2024-11-11T15:49:21,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-11T15:49:21,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-11T15:49:21,174 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:21,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,174 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:21,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:21,174 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T15:49:21,175 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:21,178 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40339 2024-11-11T15:49:21,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40339 connecting to ZooKeeper ensemble=127.0.0.1:51788 2024-11-11T15:49:21,181 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403390x0, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:21,225 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:403390x0, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:21,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40339-0x1019805c26d0002 connected 2024-11-11T15:49:21,225 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T15:49:21,234 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T15:49:21,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T15:49:21,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:21,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40339 2024-11-11T15:49:21,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40339 2024-11-11T15:49:21,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40339 2024-11-11T15:49:21,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40339 2024-11-11T15:49:21,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40339 2024-11-11T15:49:21,307 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9a1fddc00362:0 server-side Connection retries=45 2024-11-11T15:49:21,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,307 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T15:49:21,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T15:49:21,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T15:49:21,307 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T15:49:21,308 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T15:49:21,309 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36919 2024-11-11T15:49:21,311 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36919 connecting to ZooKeeper ensemble=127.0.0.1:51788 2024-11-11T15:49:21,312 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,315 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369190x0, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T15:49:21,329 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:369190x0, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:21,329 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T15:49:21,331 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36919-0x1019805c26d0003 connected 2024-11-11T15:49:21,334 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T15:49:21,335 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T15:49:21,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T15:49:21,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36919 2024-11-11T15:49:21,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36919 2024-11-11T15:49:21,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36919 2024-11-11T15:49:21,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36919 2024-11-11T15:49:21,396 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36919 2024-11-11T15:49:21,422 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9a1fddc00362:40511 2024-11-11T15:49:21,423 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9a1fddc00362,40511,1731340160788 2024-11-11T15:49:21,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,432 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9a1fddc00362,40511,1731340160788 2024-11-11T15:49:21,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:21,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:21,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:21,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,448 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T15:49:21,449 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9a1fddc00362,40511,1731340160788 from backup master directory 2024-11-11T15:49:21,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,455 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:21,455 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9a1fddc00362,40511,1731340160788 2024-11-11T15:49:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9a1fddc00362,40511,1731340160788 2024-11-11T15:49:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T15:49:21,486 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/hbase.id] with ID: 42c320c3-0a6e-403e-8967-0ccccaf38211 2024-11-11T15:49:21,487 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/.tmp/hbase.id 2024-11-11T15:49:21,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741826_1002 (size=42) 2024-11-11T15:49:21,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741826_1002 (size=42) 2024-11-11T15:49:21,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741826_1002 (size=42) 2024-11-11T15:49:21,571 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/.tmp/hbase.id]:[hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/hbase.id] 2024-11-11T15:49:21,637 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T15:49:21,637 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T15:49:21,643 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 6ms. 2024-11-11T15:49:21,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:21,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741827_1003 (size=196) 2024-11-11T15:49:21,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741827_1003 (size=196) 2024-11-11T15:49:21,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741827_1003 (size=196) 2024-11-11T15:49:22,200 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T15:49:22,201 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T15:49:22,201 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T15:49:22,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741828_1004 (size=1189) 2024-11-11T15:49:22,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741828_1004 (size=1189) 2024-11-11T15:49:22,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741828_1004 (size=1189) 2024-11-11T15:49:22,241 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store 2024-11-11T15:49:22,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741829_1005 (size=34) 2024-11-11T15:49:22,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741829_1005 (size=34) 2024-11-11T15:49:22,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741829_1005 (size=34) 2024-11-11T15:49:22,301 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:22,301 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T15:49:22,301 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:22,301 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:22,301 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T15:49:22,301 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:22,301 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:22,301 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731340162301Disabling compacts and flushes for region at 1731340162301Disabling writes for close at 1731340162301Writing region close event to WAL at 1731340162301Closed at 1731340162301 2024-11-11T15:49:22,309 WARN [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/.initializing 2024-11-11T15:49:22,309 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/WALs/9a1fddc00362,40511,1731340160788 2024-11-11T15:49:22,315 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C40511%2C1731340160788, suffix=, logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/WALs/9a1fddc00362,40511,1731340160788, archiveDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/oldWALs, maxLogs=10 2024-11-11T15:49:22,316 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9a1fddc00362%2C40511%2C1731340160788.1731340162316 2024-11-11T15:49:22,373 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/WALs/9a1fddc00362,40511,1731340160788/9a1fddc00362%2C40511%2C1731340160788.1731340162316 2024-11-11T15:49:22,404 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32941:32941),(127.0.0.1/127.0.0.1:39775:39775),(127.0.0.1/127.0.0.1:37217:37217)] 2024-11-11T15:49:22,426 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T15:49:22,427 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:22,427 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,427 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T15:49:22,465 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:22,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:22,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,489 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T15:49:22,489 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:22,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:22,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,515 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T15:49:22,516 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:22,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:22,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T15:49:22,531 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:22,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:22,539 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,541 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,550 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,553 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,553 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,554 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T15:49:22,556 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T15:49:22,561 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T15:49:22,562 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61832414, jitterRate=-0.07862523198127747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T15:49:22,563 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731340162427Initializing all the Stores at 1731340162442 (+15 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340162442Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340162446 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340162446Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340162446Cleaning up temporary data from old regions at 1731340162553 (+107 ms)Region opened successfully at 1731340162563 (+10 ms) 2024-11-11T15:49:22,564 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T15:49:22,573 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542eda4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:22,580 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T15:49:22,580 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T15:49:22,581 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T15:49:22,581 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T15:49:22,583 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T15:49:22,584 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T15:49:22,584 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T15:49:22,607 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T15:49:22,610 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T15:49:22,612 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T15:49:22,613 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T15:49:22,615 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T15:49:22,617 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T15:49:22,617 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T15:49:22,632 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T15:49:22,649 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T15:49:22,660 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T15:49:22,665 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T15:49:22,672 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T15:49:22,680 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T15:49:22,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:22,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:22,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,685 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9a1fddc00362,40511,1731340160788, sessionid=0x1019805c26d0000, setting cluster-up flag (Was=false) 2024-11-11T15:49:22,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:22,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:22,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,720 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T15:49:22,739 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9a1fddc00362,40511,1731340160788 2024-11-11T15:49:22,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:22,796 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T15:49:22,811 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9a1fddc00362,40511,1731340160788 2024-11-11T15:49:22,821 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T15:49:22,840 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:22,840 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T15:49:22,841 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T15:49:22,841 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9a1fddc00362,40511,1731340160788 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T15:49:22,883 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:22,883 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:22,884 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:22,884 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9a1fddc00362:0, corePoolSize=5, maxPoolSize=5 2024-11-11T15:49:22,884 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9a1fddc00362:0, corePoolSize=10, maxPoolSize=10 2024-11-11T15:49:22,884 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:22,884 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:22,884 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:22,896 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731340192896 2024-11-11T15:49:22,896 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T15:49:22,897 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T15:49:22,897 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T15:49:22,897 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T15:49:22,897 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T15:49:22,897 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T15:49:22,897 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:22,897 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T15:49:22,899 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:22,899 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T15:49:22,900 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:22,907 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T15:49:22,907 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T15:49:22,907 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T15:49:22,907 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T15:49:22,908 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T15:49:22,908 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.large.0-1731340162908,5,FailOnTimeoutGroup] 2024-11-11T15:49:22,912 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.small.0-1731340162908,5,FailOnTimeoutGroup] 2024-11-11T15:49:22,912 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:22,912 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T15:49:22,912 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:22,912 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:22,920 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(746): ClusterId : 42c320c3-0a6e-403e-8967-0ccccaf38211 2024-11-11T15:49:22,920 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T15:49:22,922 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(746): ClusterId : 42c320c3-0a6e-403e-8967-0ccccaf38211 2024-11-11T15:49:22,922 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T15:49:22,928 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T15:49:22,928 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T15:49:22,931 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T15:49:22,932 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(746): ClusterId : 42c320c3-0a6e-403e-8967-0ccccaf38211 2024-11-11T15:49:22,933 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T15:49:22,935 DEBUG [RS:1;9a1fddc00362:40339 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@587382a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:22,940 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T15:49:22,943 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T15:49:22,948 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T15:49:22,948 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T15:49:22,960 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T15:49:22,961 DEBUG [RS:0;9a1fddc00362:46573 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50053b6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:22,962 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T15:49:22,968 DEBUG [RS:2;9a1fddc00362:36919 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36d31b75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9a1fddc00362/172.17.0.3:0 2024-11-11T15:49:22,977 DEBUG [RS:1;9a1fddc00362:40339 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9a1fddc00362:40339 2024-11-11T15:49:22,978 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T15:49:22,978 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T15:49:22,978 DEBUG [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T15:49:22,989 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(2659): reportForDuty to master=9a1fddc00362,40511,1731340160788 with port=40339, startcode=1731340161173 2024-11-11T15:49:22,989 DEBUG [RS:1;9a1fddc00362:40339 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T15:49:22,989 DEBUG [RS:0;9a1fddc00362:46573 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9a1fddc00362:46573 2024-11-11T15:49:22,990 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T15:49:22,990 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T15:49:22,990 DEBUG [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T15:49:22,997 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(2659): reportForDuty to master=9a1fddc00362,40511,1731340160788 with port=46573, startcode=1731340161030 2024-11-11T15:49:22,997 DEBUG [RS:0;9a1fddc00362:46573 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T15:49:23,011 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53329, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T15:49:23,015 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58491, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T15:49:23,016 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9a1fddc00362,40339,1731340161173 2024-11-11T15:49:23,016 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40511 {}] master.ServerManager(517): Registering regionserver=9a1fddc00362,40339,1731340161173 2024-11-11T15:49:23,026 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9a1fddc00362,46573,1731340161030 2024-11-11T15:49:23,026 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40511 {}] master.ServerManager(517): Registering regionserver=9a1fddc00362,46573,1731340161030 2024-11-11T15:49:23,032 DEBUG [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7 2024-11-11T15:49:23,032 DEBUG [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35711 2024-11-11T15:49:23,032 DEBUG [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T15:49:23,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741831_1007 (size=1321) 2024-11-11T15:49:23,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741831_1007 (size=1321) 2024-11-11T15:49:23,034 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9a1fddc00362:36919 2024-11-11T15:49:23,035 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T15:49:23,035 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T15:49:23,035 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T15:49:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741831_1007 (size=1321) 2024-11-11T15:49:23,041 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(2659): reportForDuty to master=9a1fddc00362,40511,1731340160788 with port=36919, startcode=1731340161306 2024-11-11T15:49:23,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:23,042 DEBUG [RS:2;9a1fddc00362:36919 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T15:49:23,044 DEBUG [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7 2024-11-11T15:49:23,044 DEBUG [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35711 2024-11-11T15:49:23,044 DEBUG [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T15:49:23,045 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38309, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T15:49:23,048 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,048 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40511 {}] master.ServerManager(517): Registering regionserver=9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,049 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T15:49:23,050 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7 2024-11-11T15:49:23,057 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9a1fddc00362,46573,1731340161030] 2024-11-11T15:49:23,057 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9a1fddc00362,40339,1731340161173] 2024-11-11T15:49:23,059 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7 2024-11-11T15:49:23,059 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35711 2024-11-11T15:49:23,059 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T15:49:23,060 DEBUG [RS:1;9a1fddc00362:40339 {}] zookeeper.ZKUtil(111): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9a1fddc00362,40339,1731340161173 2024-11-11T15:49:23,060 WARN [RS:1;9a1fddc00362:40339 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:23,060 INFO [RS:1;9a1fddc00362:40339 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T15:49:23,060 DEBUG [RS:0;9a1fddc00362:46573 {}] zookeeper.ZKUtil(111): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9a1fddc00362,46573,1731340161030 2024-11-11T15:49:23,060 DEBUG [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,40339,1731340161173 2024-11-11T15:49:23,060 WARN [RS:0;9a1fddc00362:46573 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:23,060 INFO [RS:0;9a1fddc00362:46573 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T15:49:23,060 DEBUG [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,46573,1731340161030 2024-11-11T15:49:23,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:23,071 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9a1fddc00362,36919,1731340161306] 2024-11-11T15:49:23,071 DEBUG [RS:2;9a1fddc00362:36919 {}] zookeeper.ZKUtil(111): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,071 WARN [RS:2;9a1fddc00362:36919 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T15:49:23,071 INFO [RS:2;9a1fddc00362:36919 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T15:49:23,072 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,096 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T15:49:23,108 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T15:49:23,109 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T15:49:23,129 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T15:49:23,134 INFO [RS:1;9a1fddc00362:40339 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T15:49:23,134 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,140 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T15:49:23,152 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T15:49:23,152 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:23,153 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:23,154 DEBUG [RS:1;9a1fddc00362:40339 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:23,162 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T15:49:23,163 INFO [RS:0;9a1fddc00362:46573 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T15:49:23,163 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,164 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T15:49:23,169 INFO [RS:2;9a1fddc00362:36919 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T15:49:23,169 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,175 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T15:49:23,178 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T15:49:23,178 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,179 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,180 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:23,180 DEBUG [RS:2;9a1fddc00362:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:23,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741832_1008 (size=32) 2024-11-11T15:49:23,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741832_1008 (size=32) 2024-11-11T15:49:23,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741832_1008 (size=32) 2024-11-11T15:49:23,188 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,188 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,188 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,188 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,188 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,188 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40339,1731340161173-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:23,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:23,208 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,208 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,208 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,208 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,208 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,208 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,36919,1731340161306-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:23,228 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T15:49:23,230 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T15:49:23,230 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40339,1731340161173-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,231 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,231 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.Replication(171): 9a1fddc00362,40339,1731340161173 started 2024-11-11T15:49:23,236 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T15:49:23,236 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9a1fddc00362:0, corePoolSize=2, maxPoolSize=2 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,237 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,238 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,238 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,238 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,238 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9a1fddc00362:0, corePoolSize=1, maxPoolSize=1 2024-11-11T15:49:23,238 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:23,238 DEBUG [RS:0;9a1fddc00362:46573 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0, corePoolSize=3, maxPoolSize=3 2024-11-11T15:49:23,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T15:49:23,245 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T15:49:23,246 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,36919,1731340161306-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,246 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,246 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.Replication(171): 9a1fddc00362,36919,1731340161306 started 2024-11-11T15:49:23,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T15:49:23,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:23,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:23,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T15:49:23,260 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,260 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,260 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,260 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,260 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,260 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,46573,1731340161030-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:23,265 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T15:49:23,265 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:23,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:23,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T15:49:23,281 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T15:49:23,281 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:23,282 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,282 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(1482): Serving as 9a1fddc00362,40339,1731340161173, RpcServer on 9a1fddc00362/172.17.0.3:40339, sessionid=0x1019805c26d0002 2024-11-11T15:49:23,283 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T15:49:23,283 DEBUG [RS:1;9a1fddc00362:40339 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9a1fddc00362,40339,1731340161173 2024-11-11T15:49:23,283 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,40339,1731340161173' 2024-11-11T15:49:23,283 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T15:49:23,283 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:23,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T15:49:23,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T15:49:23,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:23,296 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T15:49:23,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:23,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T15:49:23,297 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T15:49:23,297 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T15:49:23,297 DEBUG [RS:1;9a1fddc00362:40339 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9a1fddc00362,40339,1731340161173 2024-11-11T15:49:23,297 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,40339,1731340161173' 2024-11-11T15:49:23,297 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T15:49:23,298 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T15:49:23,298 DEBUG [RS:1;9a1fddc00362:40339 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T15:49:23,298 INFO [RS:1;9a1fddc00362:40339 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T15:49:23,298 INFO [RS:1;9a1fddc00362:40339 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T15:49:23,302 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,303 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1482): Serving as 9a1fddc00362,36919,1731340161306, RpcServer on 9a1fddc00362/172.17.0.3:36919, sessionid=0x1019805c26d0003 2024-11-11T15:49:23,303 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T15:49:23,303 DEBUG [RS:2;9a1fddc00362:36919 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,303 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,36919,1731340161306' 2024-11-11T15:49:23,303 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T15:49:23,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740 2024-11-11T15:49:23,305 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T15:49:23,306 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,46573,1731340161030-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,306 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,306 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.Replication(171): 9a1fddc00362,46573,1731340161030 started 2024-11-11T15:49:23,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740 2024-11-11T15:49:23,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T15:49:23,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T15:49:23,310 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T15:49:23,312 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T15:49:23,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T15:49:23,317 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T15:49:23,318 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68909934, jitterRate=0.026838034391403198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T15:49:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731340163190Initializing all the Stores at 1731340163200 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340163200Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340163239 (+39 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340163239Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340163239Cleaning up temporary data from old regions at 1731340163309 (+70 ms)Region opened successfully at 1731340163319 (+10 ms) 2024-11-11T15:49:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T15:49:23,319 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T15:49:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T15:49:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T15:49:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T15:49:23,322 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T15:49:23,322 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T15:49:23,323 DEBUG [RS:2;9a1fddc00362:36919 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,323 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,36919,1731340161306' 2024-11-11T15:49:23,323 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T15:49:23,328 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T15:49:23,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731340163319Disabling compacts and flushes for region at 1731340163319Disabling writes for close at 1731340163319Writing region close event to WAL at 1731340163328 (+9 ms)Closed at 1731340163328 2024-11-11T15:49:23,330 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T15:49:23,334 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:23,334 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T15:49:23,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T15:49:23,338 DEBUG [RS:2;9a1fddc00362:36919 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T15:49:23,338 INFO [RS:2;9a1fddc00362:36919 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T15:49:23,338 INFO [RS:2;9a1fddc00362:36919 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T15:49:23,344 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:23,344 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(1482): Serving as 9a1fddc00362,46573,1731340161030, RpcServer on 9a1fddc00362/172.17.0.3:46573, sessionid=0x1019805c26d0001 2024-11-11T15:49:23,344 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T15:49:23,345 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T15:49:23,345 DEBUG [RS:0;9a1fddc00362:46573 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9a1fddc00362,46573,1731340161030 2024-11-11T15:49:23,345 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,46573,1731340161030' 2024-11-11T15:49:23,345 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T15:49:23,347 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T15:49:23,361 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T15:49:23,372 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T15:49:23,372 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T15:49:23,372 DEBUG [RS:0;9a1fddc00362:46573 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9a1fddc00362,46573,1731340161030 2024-11-11T15:49:23,372 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9a1fddc00362,46573,1731340161030' 2024-11-11T15:49:23,372 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T15:49:23,380 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T15:49:23,395 DEBUG [RS:0;9a1fddc00362:46573 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T15:49:23,395 INFO [RS:0;9a1fddc00362:46573 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T15:49:23,395 INFO [RS:0;9a1fddc00362:46573 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T15:49:23,419 INFO [RS:1;9a1fddc00362:40339 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C40339%2C1731340161173, suffix=, logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,40339,1731340161173, archiveDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs, maxLogs=32 2024-11-11T15:49:23,422 INFO [RS:1;9a1fddc00362:40339 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9a1fddc00362%2C40339%2C1731340161173.1731340163421 2024-11-11T15:49:23,448 INFO [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C36919%2C1731340161306, suffix=, logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,36919,1731340161306, archiveDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs, maxLogs=32 2024-11-11T15:49:23,450 INFO [RS:2;9a1fddc00362:36919 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9a1fddc00362%2C36919%2C1731340161306.1731340163449 2024-11-11T15:49:23,485 INFO [RS:1;9a1fddc00362:40339 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,40339,1731340161173/9a1fddc00362%2C40339%2C1731340161173.1731340163421 2024-11-11T15:49:23,498 WARN [9a1fddc00362:40511 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T15:49:23,500 DEBUG [RS:1;9a1fddc00362:40339 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32941:32941),(127.0.0.1/127.0.0.1:37217:37217),(127.0.0.1/127.0.0.1:39775:39775)] 2024-11-11T15:49:23,502 INFO [RS:0;9a1fddc00362:46573 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C46573%2C1731340161030, suffix=, logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,46573,1731340161030, archiveDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs, maxLogs=32 2024-11-11T15:49:23,503 INFO [RS:0;9a1fddc00362:46573 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9a1fddc00362%2C46573%2C1731340161030.1731340163503 2024-11-11T15:49:23,536 INFO [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,36919,1731340161306/9a1fddc00362%2C36919%2C1731340161306.1731340163449 2024-11-11T15:49:23,568 DEBUG [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37217:37217),(127.0.0.1/127.0.0.1:32941:32941),(127.0.0.1/127.0.0.1:39775:39775)] 2024-11-11T15:49:23,632 INFO [RS:0;9a1fddc00362:46573 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,46573,1731340161030/9a1fddc00362%2C46573%2C1731340161030.1731340163503 2024-11-11T15:49:23,660 DEBUG [RS:0;9a1fddc00362:46573 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32941:32941),(127.0.0.1/127.0.0.1:37217:37217),(127.0.0.1/127.0.0.1:39775:39775)] 2024-11-11T15:49:23,748 DEBUG [9a1fddc00362:40511 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T15:49:23,749 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(204): Hosts are {9a1fddc00362=0} racks are {/default-rack=0} 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T15:49:23,753 INFO [9a1fddc00362:40511 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T15:49:23,753 INFO [9a1fddc00362:40511 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T15:49:23,753 INFO [9a1fddc00362:40511 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T15:49:23,753 DEBUG [9a1fddc00362:40511 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T15:49:23,754 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9a1fddc00362,36919,1731340161306 2024-11-11T15:49:23,758 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9a1fddc00362,36919,1731340161306, state=OPENING 2024-11-11T15:49:23,774 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T15:49:23,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:23,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:23,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:23,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:23,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:23,792 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T15:49:23,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9a1fddc00362,36919,1731340161306}] 2024-11-11T15:49:23,796 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:23,797 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:23,797 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:23,950 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T15:49:23,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39011, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T15:49:23,969 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T15:49:23,970 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T15:49:23,977 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9a1fddc00362%2C36919%2C1731340161306.meta, suffix=.meta, logDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,36919,1731340161306, archiveDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs, maxLogs=32 2024-11-11T15:49:23,979 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9a1fddc00362%2C36919%2C1731340161306.meta.1731340163978.meta 2024-11-11T15:49:24,076 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/WALs/9a1fddc00362,36919,1731340161306/9a1fddc00362%2C36919%2C1731340161306.meta.1731340163978.meta 2024-11-11T15:49:24,092 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39775:39775),(127.0.0.1/127.0.0.1:32941:32941),(127.0.0.1/127.0.0.1:37217:37217)] 2024-11-11T15:49:24,118 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T15:49:24,119 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T15:49:24,119 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T15:49:24,119 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T15:49:24,120 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T15:49:24,120 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:24,120 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T15:49:24,120 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T15:49:24,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T15:49:24,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T15:49:24,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:24,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:24,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T15:49:24,157 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T15:49:24,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:24,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:24,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T15:49:24,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T15:49:24,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:24,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:24,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T15:49:24,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T15:49:24,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:24,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T15:49:24,192 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T15:49:24,197 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740 2024-11-11T15:49:24,204 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740 2024-11-11T15:49:24,226 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T15:49:24,226 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T15:49:24,230 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T15:49:24,249 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T15:49:24,257 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73154437, jitterRate=0.09008605778217316}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T15:49:24,258 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T15:49:24,260 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731340164126Writing region info on filesystem at 1731340164126Initializing all the Stores at 1731340164136 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340164136Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340164140 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340164140Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731340164140Cleaning up temporary data from old regions at 1731340164226 (+86 ms)Running coprocessor post-open hooks at 1731340164258 (+32 ms)Region opened successfully at 1731340164259 (+1 ms) 2024-11-11T15:49:24,272 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731340163949 2024-11-11T15:49:24,297 DEBUG [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T15:49:24,297 INFO [RS_OPEN_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T15:49:24,302 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9a1fddc00362,36919,1731340161306 2024-11-11T15:49:24,308 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9a1fddc00362,36919,1731340161306, state=OPEN 2024-11-11T15:49:24,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:24,312 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:24,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:24,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:24,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:24,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:24,317 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9a1fddc00362,36919,1731340161306 2024-11-11T15:49:24,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T15:49:24,320 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T15:49:24,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T15:49:24,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9a1fddc00362,36919,1731340161306 in 525 msec 2024-11-11T15:49:24,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T15:49:24,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 998 msec 2024-11-11T15:49:24,368 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T15:49:24,368 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T15:49:24,375 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T15:49:24,375 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9a1fddc00362,36919,1731340161306, seqNum=-1] 2024-11-11T15:49:24,375 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T15:49:24,378 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35909, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T15:49:24,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5680 sec 2024-11-11T15:49:24,414 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731340164414, completionTime=-1 2024-11-11T15:49:24,414 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T15:49:24,414 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T15:49:24,418 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-11T15:49:24,418 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731340224418 2024-11-11T15:49:24,419 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731340284419 2024-11-11T15:49:24,419 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 4 msec 2024-11-11T15:49:24,419 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-11T15:49:24,420 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40511,1731340160788-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,420 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40511,1731340160788-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,420 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40511,1731340160788-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,420 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9a1fddc00362:40511, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,420 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,422 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,427 DEBUG [master/9a1fddc00362:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T15:49:24,430 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.974sec 2024-11-11T15:49:24,430 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T15:49:24,430 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T15:49:24,430 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T15:49:24,431 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T15:49:24,431 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T15:49:24,431 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40511,1731340160788-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T15:49:24,431 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40511,1731340160788-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T15:49:24,435 DEBUG [master/9a1fddc00362:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T15:49:24,435 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T15:49:24,436 INFO [master/9a1fddc00362:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9a1fddc00362,40511,1731340160788-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T15:49:24,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac6046a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T15:49:24,457 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9a1fddc00362,40511,-1 for getting cluster id 2024-11-11T15:49:24,457 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T15:49:24,473 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '42c320c3-0a6e-403e-8967-0ccccaf38211' 2024-11-11T15:49:24,474 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T15:49:24,474 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "42c320c3-0a6e-403e-8967-0ccccaf38211" 2024-11-11T15:49:24,475 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@750432b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T15:49:24,475 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9a1fddc00362,40511,-1] 2024-11-11T15:49:24,475 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T15:49:24,482 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:24,489 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T15:49:24,502 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1282192, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T15:49:24,503 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T15:49:24,509 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9a1fddc00362,36919,1731340161306, seqNum=-1] 2024-11-11T15:49:24,509 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T15:49:24,519 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37044, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T15:49:24,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9a1fddc00362,40511,1731340160788 2024-11-11T15:49:24,529 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T15:49:24,540 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 9a1fddc00362,40511,1731340160788 2024-11-11T15:49:24,540 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@fa485ac 2024-11-11T15:49:24,541 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T15:49:24,553 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T15:49:24,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T15:49:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-11T15:49:24,576 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T15:49:24,577 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:24,580 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-11T15:49:24,580 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T15:49:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:24,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741837_1013 (size=392) 2024-11-11T15:49:24,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741837_1013 (size=392) 2024-11-11T15:49:24,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741837_1013 (size=392) 2024-11-11T15:49:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:24,702 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a4919f76ae377776e837e79266e334bc, NAME => 'TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7 2024-11-11T15:49:24,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741838_1014 (size=51) 2024-11-11T15:49:24,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741838_1014 (size=51) 2024-11-11T15:49:24,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741838_1014 (size=51) 2024-11-11T15:49:24,845 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:24,845 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing a4919f76ae377776e837e79266e334bc, disabling compactions & flushes 2024-11-11T15:49:24,845 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:24,845 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:24,845 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. after waiting 0 ms 2024-11-11T15:49:24,845 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:24,845 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:24,845 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for a4919f76ae377776e837e79266e334bc: Waiting for close lock at 1731340164845Disabling compacts and flushes for region at 1731340164845Disabling writes for close at 1731340164845Writing region close event to WAL at 1731340164845Closed at 1731340164845 2024-11-11T15:49:24,851 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T15:49:24,851 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731340164851"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731340164851"}]},"ts":"1731340164851"} 2024-11-11T15:49:24,857 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T15:49:24,860 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T15:49:24,860 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731340164860"}]},"ts":"1731340164860"} 2024-11-11T15:49:24,869 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-11T15:49:24,869 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {9a1fddc00362=0} racks are {/default-rack=0} 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T15:49:24,874 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T15:49:24,874 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T15:49:24,874 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T15:49:24,874 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T15:49:24,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a4919f76ae377776e837e79266e334bc, ASSIGN}] 2024-11-11T15:49:24,889 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a4919f76ae377776e837e79266e334bc, ASSIGN 2024-11-11T15:49:24,897 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a4919f76ae377776e837e79266e334bc, ASSIGN; state=OFFLINE, location=9a1fddc00362,36919,1731340161306; forceNewPlan=false, retain=false 2024-11-11T15:49:24,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:25,048 INFO [9a1fddc00362:40511 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T15:49:25,049 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a4919f76ae377776e837e79266e334bc, regionState=OPENING, regionLocation=9a1fddc00362,36919,1731340161306 2024-11-11T15:49:25,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a4919f76ae377776e837e79266e334bc, ASSIGN because future has completed 2024-11-11T15:49:25,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a4919f76ae377776e837e79266e334bc, server=9a1fddc00362,36919,1731340161306}] 2024-11-11T15:49:25,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:25,244 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:25,244 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a4919f76ae377776e837e79266e334bc, NAME => 'TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc.', STARTKEY => '', ENDKEY => ''} 2024-11-11T15:49:25,245 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,245 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T15:49:25,245 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,245 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,266 INFO [StoreOpener-a4919f76ae377776e837e79266e334bc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,285 INFO [StoreOpener-a4919f76ae377776e837e79266e334bc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4919f76ae377776e837e79266e334bc columnFamilyName cf 2024-11-11T15:49:25,286 DEBUG [StoreOpener-a4919f76ae377776e837e79266e334bc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T15:49:25,291 INFO [StoreOpener-a4919f76ae377776e837e79266e334bc-1 {}] regionserver.HStore(327): Store=a4919f76ae377776e837e79266e334bc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T15:49:25,292 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,295 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,300 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,303 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,303 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,319 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,336 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T15:49:25,339 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a4919f76ae377776e837e79266e334bc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59191345, jitterRate=-0.1179802268743515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T15:49:25,340 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:25,341 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a4919f76ae377776e837e79266e334bc: Running coprocessor pre-open hook at 1731340165246Writing region info on filesystem at 1731340165246Initializing all the Stores at 1731340165261 (+15 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731340165261Cleaning up temporary data from old regions at 1731340165303 (+42 ms)Running coprocessor post-open hooks at 1731340165340 (+37 ms)Region opened successfully at 1731340165341 (+1 ms) 2024-11-11T15:49:25,356 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc., pid=6, masterSystemTime=1731340165226 2024-11-11T15:49:25,372 DEBUG [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:25,372 INFO [RS_OPEN_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:25,384 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a4919f76ae377776e837e79266e334bc, regionState=OPEN, openSeqNum=2, regionLocation=9a1fddc00362,36919,1731340161306 2024-11-11T15:49:25,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a4919f76ae377776e837e79266e334bc, server=9a1fddc00362,36919,1731340161306 because future has completed 2024-11-11T15:49:25,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T15:49:25,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a4919f76ae377776e837e79266e334bc, server=9a1fddc00362,36919,1731340161306 in 342 msec 2024-11-11T15:49:25,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T15:49:25,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a4919f76ae377776e837e79266e334bc, ASSIGN in 558 msec 2024-11-11T15:49:25,463 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T15:49:25,463 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731340165463"}]},"ts":"1731340165463"} 2024-11-11T15:49:25,471 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-11T15:49:25,474 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T15:49:25,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 918 msec 2024-11-11T15:49:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T15:49:25,730 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T15:49:25,730 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-11T15:49:25,731 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T15:49:25,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-11T15:49:25,745 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T15:49:25,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-11T15:49:25,750 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc., hostname=9a1fddc00362,36919,1731340161306, seqNum=2] 2024-11-11T15:49:25,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-11T15:49:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-11T15:49:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:25,785 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-11T15:49:25,800 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T15:49:25,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T15:49:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:25,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36919 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T15:49:25,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:25,996 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing a4919f76ae377776e837e79266e334bc 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-11T15:49:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/.tmp/cf/f8327d7205eb40da899efe7af144a051 is 36, key is row/cf:cq/1731340165754/Put/seqid=0 2024-11-11T15:49:26,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741839_1015 (size=4787) 2024-11-11T15:49:26,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741839_1015 (size=4787) 2024-11-11T15:49:26,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741839_1015 (size=4787) 2024-11-11T15:49:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:26,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:26,482 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/.tmp/cf/f8327d7205eb40da899efe7af144a051 2024-11-11T15:49:26,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/.tmp/cf/f8327d7205eb40da899efe7af144a051 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/cf/f8327d7205eb40da899efe7af144a051 2024-11-11T15:49:26,558 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/cf/f8327d7205eb40da899efe7af144a051, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T15:49:26,568 INFO [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for a4919f76ae377776e837e79266e334bc in 571ms, sequenceid=5, compaction requested=false 2024-11-11T15:49:26,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for a4919f76ae377776e837e79266e334bc: 2024-11-11T15:49:26,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:26,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9a1fddc00362:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T15:49:26,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T15:49:26,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T15:49:26,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 785 msec 2024-11-11T15:49:26,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 839 msec 2024-11-11T15:49:26,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T15:49:26,919 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T15:49:26,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T15:49:26,924 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T15:49:26,924 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:26,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T15:49:26,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1814889570, stopped=false 2024-11-11T15:49:26,925 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T15:49:26,925 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9a1fddc00362,40511,1731340160788 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T15:49:26,927 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T15:49:26,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:26,927 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T15:49:26,928 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:26,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:26,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:26,928 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9a1fddc00362,46573,1731340161030' ***** 2024-11-11T15:49:26,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:26,928 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T15:49:26,928 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9a1fddc00362,40339,1731340161173' ***** 2024-11-11T15:49:26,928 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T15:49:26,929 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9a1fddc00362,36919,1731340161306' ***** 2024-11-11T15:49:26,929 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T15:49:26,929 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T15:49:26,929 INFO [RS:0;9a1fddc00362:46573 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T15:49:26,929 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T15:49:26,929 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T15:49:26,929 INFO [RS:0;9a1fddc00362:46573 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T15:49:26,929 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T15:49:26,929 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(959): stopping server 9a1fddc00362,46573,1731340161030 2024-11-11T15:49:26,929 INFO [RS:0;9a1fddc00362:46573 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:26,929 INFO [RS:1;9a1fddc00362:40339 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T15:49:26,929 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T15:49:26,930 INFO [RS:1;9a1fddc00362:40339 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T15:49:26,930 INFO [RS:0;9a1fddc00362:46573 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9a1fddc00362:46573. 2024-11-11T15:49:26,930 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(959): stopping server 9a1fddc00362,40339,1731340161173 2024-11-11T15:49:26,930 INFO [RS:1;9a1fddc00362:40339 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:26,930 DEBUG [RS:0;9a1fddc00362:46573 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:26,930 DEBUG [RS:0;9a1fddc00362:46573 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,931 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(976): stopping server 9a1fddc00362,46573,1731340161030; all regions closed. 2024-11-11T15:49:26,931 INFO [RS:1;9a1fddc00362:40339 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9a1fddc00362:40339. 2024-11-11T15:49:26,931 DEBUG [RS:1;9a1fddc00362:40339 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:26,931 DEBUG [RS:1;9a1fddc00362:40339 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,931 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(976): stopping server 9a1fddc00362,40339,1731340161173; all regions closed. 2024-11-11T15:49:26,931 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T15:49:26,931 INFO [RS:2;9a1fddc00362:36919 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T15:49:26,931 INFO [RS:2;9a1fddc00362:36919 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T15:49:26,931 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(3091): Received CLOSE for a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:26,932 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T15:49:26,933 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(959): stopping server 9a1fddc00362,36919,1731340161306 2024-11-11T15:49:26,933 INFO [RS:2;9a1fddc00362:36919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:26,933 INFO [RS:2;9a1fddc00362:36919 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9a1fddc00362:36919. 2024-11-11T15:49:26,933 DEBUG [RS:2;9a1fddc00362:36919 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T15:49:26,933 DEBUG [RS:2;9a1fddc00362:36919 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,933 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T15:49:26,936 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T15:49:26,936 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T15:49:26,936 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,936 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T15:49:26,936 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,936 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,936 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a4919f76ae377776e837e79266e334bc, disabling compactions & flushes 2024-11-11T15:49:26,936 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T15:49:26,936 INFO [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:26,937 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:26,937 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, a4919f76ae377776e837e79266e334bc=TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc.} 2024-11-11T15:49:26,937 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. after waiting 0 ms 2024-11-11T15:49:26,937 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:26,937 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a4919f76ae377776e837e79266e334bc 2024-11-11T15:49:26,940 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,940 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,940 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T15:49:26,940 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T15:49:26,940 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T15:49:26,940 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T15:49:26,940 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,940 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T15:49:26,940 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,941 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,941 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,941 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-11T15:49:26,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:26,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741833_1009 (size=93) 2024-11-11T15:49:26,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741833_1009 (size=93) 2024-11-11T15:49:26,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741833_1009 (size=93) 2024-11-11T15:49:26,953 INFO [regionserver/9a1fddc00362:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:26,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741835_1011 (size=93) 2024-11-11T15:49:26,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741835_1011 (size=93) 2024-11-11T15:49:26,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741835_1011 (size=93) 2024-11-11T15:49:26,984 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/default/TestHBaseWalOnEC/a4919f76ae377776e837e79266e334bc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T15:49:26,992 DEBUG [RS:0;9a1fddc00362:46573 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs 2024-11-11T15:49:26,992 INFO [RS:0;9a1fddc00362:46573 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9a1fddc00362%2C46573%2C1731340161030:(num 1731340163503) 2024-11-11T15:49:26,992 DEBUG [RS:0;9a1fddc00362:46573 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:26,993 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:26,993 INFO [RS:0;9a1fddc00362:46573 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:26,994 INFO [RS:0;9a1fddc00362:46573 {}] hbase.ChoreService(370): Chore service for: regionserver/9a1fddc00362:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:26,994 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T15:49:26,994 INFO [regionserver/9a1fddc00362:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:26,994 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T15:49:26,994 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T15:49:26,994 INFO [RS:0;9a1fddc00362:46573 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:26,994 INFO [RS:0;9a1fddc00362:46573 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46573 2024-11-11T15:49:26,995 INFO [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:26,995 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a4919f76ae377776e837e79266e334bc: Waiting for close lock at 1731340166936Running coprocessor pre-close hooks at 1731340166936Disabling compacts and flushes for region at 1731340166936Disabling writes for close at 1731340166937 (+1 ms)Writing region close event to WAL at 1731340166943 (+6 ms)Running coprocessor post-close hooks at 1731340166995 (+52 ms)Closed at 1731340166995 2024-11-11T15:49:26,995 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/info/6508d5146f074d5b82bc8d08a6541f16 is 153, key is TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc./info:regioninfo/1731340165384/Put/seqid=0 2024-11-11T15:49:26,995 DEBUG [RS_CLOSE_REGION-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731340164555.a4919f76ae377776e837e79266e334bc. 2024-11-11T15:49:26,999 INFO [RS:0;9a1fddc00362:46573 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:26,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:27,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9a1fddc00362,46573,1731340161030 2024-11-11T15:49:27,008 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9a1fddc00362,46573,1731340161030] 2024-11-11T15:49:27,011 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9a1fddc00362,46573,1731340161030 already deleted, retry=false 2024-11-11T15:49:27,011 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9a1fddc00362,46573,1731340161030 expired; onlineServers=2 2024-11-11T15:49:27,012 INFO [regionserver/9a1fddc00362:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:27,020 INFO [regionserver/9a1fddc00362:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:27,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741840_1016 (size=6637) 2024-11-11T15:49:27,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741840_1016 (size=6637) 2024-11-11T15:49:27,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741840_1016 (size=6637) 2024-11-11T15:49:27,063 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/info/6508d5146f074d5b82bc8d08a6541f16 2024-11-11T15:49:27,108 INFO [RS:0;9a1fddc00362:46573 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:27,108 INFO [RS:0;9a1fddc00362:46573 {}] regionserver.HRegionServer(1031): Exiting; stopping=9a1fddc00362,46573,1731340161030; zookeeper connection closed. 2024-11-11T15:49:27,109 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e2ba182 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e2ba182 2024-11-11T15:49:27,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:27,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46573-0x1019805c26d0001, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:27,139 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:27,145 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/ns/2c410380af4c480688f0ad470b87abf9 is 43, key is default/ns:d/1731340164379/Put/seqid=0 2024-11-11T15:49:27,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741841_1017 (size=5153) 2024-11-11T15:49:27,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741841_1017 (size=5153) 2024-11-11T15:49:27,209 INFO [regionserver/9a1fddc00362:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T15:49:27,209 INFO [regionserver/9a1fddc00362:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T15:49:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741841_1017 (size=5153) 2024-11-11T15:49:27,216 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/ns/2c410380af4c480688f0ad470b87abf9 2024-11-11T15:49:27,286 INFO [regionserver/9a1fddc00362:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T15:49:27,286 INFO [regionserver/9a1fddc00362:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T15:49:27,330 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/table/a881232bf67540958ab6d27e531ab2e8 is 52, key is TestHBaseWalOnEC/table:state/1731340165463/Put/seqid=0 2024-11-11T15:49:27,340 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:27,388 DEBUG [RS:1;9a1fddc00362:40339 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs 2024-11-11T15:49:27,388 INFO [RS:1;9a1fddc00362:40339 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9a1fddc00362%2C40339%2C1731340161173:(num 1731340163421) 2024-11-11T15:49:27,388 DEBUG [RS:1;9a1fddc00362:40339 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:27,388 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:27,396 INFO [RS:1;9a1fddc00362:40339 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:27,396 INFO [RS:1;9a1fddc00362:40339 {}] hbase.ChoreService(370): Chore service for: regionserver/9a1fddc00362:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:27,396 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T15:49:27,397 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T15:49:27,397 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T15:49:27,397 INFO [RS:1;9a1fddc00362:40339 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:27,397 INFO [RS:1;9a1fddc00362:40339 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40339 2024-11-11T15:49:27,400 INFO [regionserver/9a1fddc00362:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:27,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:27,406 INFO [RS:1;9a1fddc00362:40339 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:27,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9a1fddc00362,40339,1731340161173 2024-11-11T15:49:27,411 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9a1fddc00362,40339,1731340161173] 2024-11-11T15:49:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741842_1018 (size=5249) 2024-11-11T15:49:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741842_1018 (size=5249) 2024-11-11T15:49:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741842_1018 (size=5249) 2024-11-11T15:49:27,428 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9a1fddc00362,40339,1731340161173 already deleted, retry=false 2024-11-11T15:49:27,428 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9a1fddc00362,40339,1731340161173 expired; onlineServers=1 2024-11-11T15:49:27,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:27,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40339-0x1019805c26d0002, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:27,513 INFO [RS:1;9a1fddc00362:40339 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:27,513 INFO [RS:1;9a1fddc00362:40339 {}] regionserver.HRegionServer(1031): Exiting; stopping=9a1fddc00362,40339,1731340161173; zookeeper connection closed. 2024-11-11T15:49:27,514 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18792472 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18792472 2024-11-11T15:49:27,540 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:27,741 DEBUG [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T15:49:27,817 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/table/a881232bf67540958ab6d27e531ab2e8 2024-11-11T15:49:27,828 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/info/6508d5146f074d5b82bc8d08a6541f16 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/info/6508d5146f074d5b82bc8d08a6541f16 2024-11-11T15:49:27,842 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/info/6508d5146f074d5b82bc8d08a6541f16, entries=10, sequenceid=11, filesize=6.5 K 2024-11-11T15:49:27,844 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/ns/2c410380af4c480688f0ad470b87abf9 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/ns/2c410380af4c480688f0ad470b87abf9 2024-11-11T15:49:27,858 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/ns/2c410380af4c480688f0ad470b87abf9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T15:49:27,868 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/.tmp/table/a881232bf67540958ab6d27e531ab2e8 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/table/a881232bf67540958ab6d27e531ab2e8 2024-11-11T15:49:27,882 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/table/a881232bf67540958ab6d27e531ab2e8, entries=2, sequenceid=11, filesize=5.1 K 2024-11-11T15:49:27,892 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 951ms, sequenceid=11, compaction requested=false 2024-11-11T15:49:27,932 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T15:49:27,936 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T15:49:27,936 INFO [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T15:49:27,937 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731340166940Running coprocessor pre-close hooks at 1731340166940Disabling compacts and flushes for region at 1731340166940Disabling writes for close at 1731340166940Obtaining lock to block concurrent updates at 1731340166941 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731340166941Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731340166942 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731340166945 (+3 ms)Flushing 1588230740/info: creating writer at 1731340166945Flushing 1588230740/info: appending metadata at 1731340166994 (+49 ms)Flushing 1588230740/info: closing flushed file at 1731340166994Flushing 1588230740/ns: creating writer at 1731340167100 (+106 ms)Flushing 1588230740/ns: appending metadata at 1731340167144 (+44 ms)Flushing 1588230740/ns: closing flushed file at 1731340167145 (+1 ms)Flushing 1588230740/table: creating writer at 1731340167274 (+129 ms)Flushing 1588230740/table: appending metadata at 1731340167328 (+54 ms)Flushing 1588230740/table: closing flushed file at 1731340167328Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e3b3872: reopening flushed file at 1731340167827 (+499 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@363399a8: reopening flushed file at 1731340167843 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b9ac55a: reopening flushed file at 1731340167859 (+16 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 951ms, sequenceid=11, compaction requested=false at 1731340167892 (+33 ms)Writing region close event to WAL at 1731340167905 (+13 ms)Running coprocessor post-close hooks at 1731340167936 (+31 ms)Closed at 1731340167936 2024-11-11T15:49:27,938 DEBUG [RS_CLOSE_META-regionserver/9a1fddc00362:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T15:49:27,941 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(976): stopping server 9a1fddc00362,36919,1731340161306; all regions closed. 2024-11-11T15:49:27,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:27,956 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:27,958 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:27,958 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:27,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:27,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741836_1012 (size=2751) 2024-11-11T15:49:27,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741836_1012 (size=2751) 2024-11-11T15:49:27,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741836_1012 (size=2751) 2024-11-11T15:49:28,018 DEBUG [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs 2024-11-11T15:49:28,018 INFO [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9a1fddc00362%2C36919%2C1731340161306.meta:.meta(num 1731340163978) 2024-11-11T15:49:28,020 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,020 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741834_1010 (size=1298) 2024-11-11T15:49:28,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741834_1010 (size=1298) 2024-11-11T15:49:28,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741834_1010 (size=1298) 2024-11-11T15:49:28,055 DEBUG [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/oldWALs 2024-11-11T15:49:28,055 INFO [RS:2;9a1fddc00362:36919 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9a1fddc00362%2C36919%2C1731340161306:(num 1731340163449) 2024-11-11T15:49:28,055 DEBUG [RS:2;9a1fddc00362:36919 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T15:49:28,055 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T15:49:28,055 INFO [RS:2;9a1fddc00362:36919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:28,056 INFO [RS:2;9a1fddc00362:36919 {}] hbase.ChoreService(370): Chore service for: regionserver/9a1fddc00362:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:28,056 INFO [RS:2;9a1fddc00362:36919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:28,056 INFO [RS:2;9a1fddc00362:36919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36919 2024-11-11T15:49:28,056 INFO [regionserver/9a1fddc00362:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:28,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9a1fddc00362,36919,1731340161306 2024-11-11T15:49:28,065 INFO [RS:2;9a1fddc00362:36919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:28,065 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$366/0x00007f50548f3fe0@6e026012 rejected from java.util.concurrent.ThreadPoolExecutor@2b00f3b6[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 13] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-11T15:49:28,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T15:49:28,069 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9a1fddc00362,36919,1731340161306] 2024-11-11T15:49:28,070 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9a1fddc00362,36919,1731340161306 already deleted, retry=false 2024-11-11T15:49:28,070 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9a1fddc00362,36919,1731340161306 expired; onlineServers=0 2024-11-11T15:49:28,070 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9a1fddc00362,40511,1731340160788' ***** 2024-11-11T15:49:28,070 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T15:49:28,070 INFO [M:0;9a1fddc00362:40511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T15:49:28,071 INFO [M:0;9a1fddc00362:40511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T15:49:28,071 DEBUG [M:0;9a1fddc00362:40511 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T15:49:28,071 DEBUG [M:0;9a1fddc00362:40511 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T15:49:28,071 DEBUG [master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.large.0-1731340162908 {}] cleaner.HFileCleaner(306): Exit Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.large.0-1731340162908,5,FailOnTimeoutGroup] 2024-11-11T15:49:28,071 DEBUG [master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.small.0-1731340162908 {}] cleaner.HFileCleaner(306): Exit Thread[master/9a1fddc00362:0:becomeActiveMaster-HFileCleaner.small.0-1731340162908,5,FailOnTimeoutGroup] 2024-11-11T15:49:28,071 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T15:49:28,071 INFO [M:0;9a1fddc00362:40511 {}] hbase.ChoreService(370): Chore service for: master/9a1fddc00362:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T15:49:28,072 INFO [M:0;9a1fddc00362:40511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T15:49:28,072 DEBUG [M:0;9a1fddc00362:40511 {}] master.HMaster(1795): Stopping service threads 2024-11-11T15:49:28,072 INFO [M:0;9a1fddc00362:40511 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T15:49:28,072 INFO [M:0;9a1fddc00362:40511 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T15:49:28,073 INFO [M:0;9a1fddc00362:40511 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T15:49:28,073 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T15:49:28,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T15:49:28,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T15:49:28,074 DEBUG [M:0;9a1fddc00362:40511 {}] zookeeper.ZKUtil(347): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T15:49:28,074 WARN [M:0;9a1fddc00362:40511 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T15:49:28,080 INFO [M:0;9a1fddc00362:40511 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/.lastflushedseqids 2024-11-11T15:49:28,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741843_1019 (size=127) 2024-11-11T15:49:28,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741843_1019 (size=127) 2024-11-11T15:49:28,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741843_1019 (size=127) 2024-11-11T15:49:28,125 INFO [M:0;9a1fddc00362:40511 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T15:49:28,125 INFO [M:0;9a1fddc00362:40511 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T15:49:28,125 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T15:49:28,125 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:28,125 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:28,125 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T15:49:28,125 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:28,125 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-11T15:49:28,168 INFO [RS:2;9a1fddc00362:36919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:28,168 INFO [RS:2;9a1fddc00362:36919 {}] regionserver.HRegionServer(1031): Exiting; stopping=9a1fddc00362,36919,1731340161306; zookeeper connection closed. 2024-11-11T15:49:28,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:28,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x1019805c26d0003, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:28,176 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4703b6d3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4703b6d3 2024-11-11T15:49:28,177 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T15:49:28,185 DEBUG [M:0;9a1fddc00362:40511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc168991501448efa16d9a792e4d4f28 is 82, key is hbase:meta,,1/info:regioninfo/1731340164302/Put/seqid=0 2024-11-11T15:49:28,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741844_1020 (size=5672) 2024-11-11T15:49:28,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741844_1020 (size=5672) 2024-11-11T15:49:28,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741844_1020 (size=5672) 2024-11-11T15:49:28,613 INFO [M:0;9a1fddc00362:40511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc168991501448efa16d9a792e4d4f28 2024-11-11T15:49:28,649 DEBUG [M:0;9a1fddc00362:40511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a0de844aadf4157af1cec8359f72b69 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731340165478/Put/seqid=0 2024-11-11T15:49:28,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741845_1021 (size=6440) 2024-11-11T15:49:28,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741845_1021 (size=6440) 2024-11-11T15:49:28,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741845_1021 (size=6440) 2024-11-11T15:49:28,689 INFO [M:0;9a1fddc00362:40511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a0de844aadf4157af1cec8359f72b69 2024-11-11T15:49:28,724 DEBUG [M:0;9a1fddc00362:40511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/25db44b9b1ef46deb5b0cea567aab086 is 69, key is 9a1fddc00362,36919,1731340161306/rs:state/1731340163049/Put/seqid=0 2024-11-11T15:49:28,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741846_1022 (size=5294) 2024-11-11T15:49:28,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741846_1022 (size=5294) 2024-11-11T15:49:28,758 INFO [M:0;9a1fddc00362:40511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/25db44b9b1ef46deb5b0cea567aab086 2024-11-11T15:49:28,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741846_1022 (size=5294) 2024-11-11T15:49:28,769 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc168991501448efa16d9a792e4d4f28 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc168991501448efa16d9a792e4d4f28 2024-11-11T15:49:28,782 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc168991501448efa16d9a792e4d4f28, entries=8, sequenceid=72, filesize=5.5 K 2024-11-11T15:49:28,788 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a0de844aadf4157af1cec8359f72b69 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a0de844aadf4157af1cec8359f72b69 2024-11-11T15:49:28,804 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a0de844aadf4157af1cec8359f72b69, entries=8, sequenceid=72, filesize=6.3 K 2024-11-11T15:49:28,817 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/25db44b9b1ef46deb5b0cea567aab086 as hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/25db44b9b1ef46deb5b0cea567aab086 2024-11-11T15:49:28,845 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35711/user/jenkins/test-data/242e11af-83cf-422f-df3d-ad4788a813c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/25db44b9b1ef46deb5b0cea567aab086, entries=3, sequenceid=72, filesize=5.2 K 2024-11-11T15:49:28,854 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 729ms, sequenceid=72, compaction requested=false 2024-11-11T15:49:28,860 INFO [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T15:49:28,861 DEBUG [M:0;9a1fddc00362:40511 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731340168125Disabling compacts and flushes for region at 1731340168125Disabling writes for close at 1731340168125Obtaining lock to block concurrent updates at 1731340168126 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731340168126Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731340168126Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731340168132 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731340168133 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731340168184 (+51 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731340168185 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731340168623 (+438 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731340168647 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731340168647Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731340168700 (+53 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731340168723 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731340168723Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e23aa70: reopening flushed file at 1731340168767 (+44 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20fae457: reopening flushed file at 1731340168783 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@535d3565: reopening flushed file at 1731340168805 (+22 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 729ms, sequenceid=72, compaction requested=false at 1731340168854 (+49 ms)Writing region close event to WAL at 1731340168860 (+6 ms)Closed at 1731340168860 2024-11-11T15:49:28,868 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,868 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,871 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,872 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T15:49:28,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45875 is added to blk_1073741830_1006 (size=32683) 2024-11-11T15:49:28,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40391 is added to blk_1073741830_1006 (size=32683) 2024-11-11T15:49:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36483 is added to blk_1073741830_1006 (size=32683) 2024-11-11T15:49:28,882 INFO [M:0;9a1fddc00362:40511 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T15:49:28,882 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T15:49:28,882 INFO [M:0;9a1fddc00362:40511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40511 2024-11-11T15:49:28,882 INFO [M:0;9a1fddc00362:40511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T15:49:29,070 INFO [M:0;9a1fddc00362:40511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T15:49:29,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:29,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40511-0x1019805c26d0000, quorum=127.0.0.1:51788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T15:49:29,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2440b4fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:29,115 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20272bbf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:29,115 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:29,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c9ea9ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:29,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61b58ba7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:29,123 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T15:49:29,123 WARN [BP-1366544186-172.17.0.3-1731340158641 heartbeating to localhost/127.0.0.1:35711 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T15:49:29,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T15:49:29,123 WARN [BP-1366544186-172.17.0.3-1731340158641 heartbeating to localhost/127.0.0.1:35711 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366544186-172.17.0.3-1731340158641 (Datanode Uuid 8975f652-06a0-46ad-8a5e-20c024604f57) service to localhost/127.0.0.1:35711 2024-11-11T15:49:29,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data5/current/BP-1366544186-172.17.0.3-1731340158641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:29,125 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T15:49:29,125 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data6/current/BP-1366544186-172.17.0.3-1731340158641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:29,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25822ceb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:29,227 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@645c4148{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:29,228 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:29,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39eb5e5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:29,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f68c25e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:29,233 WARN [BP-1366544186-172.17.0.3-1731340158641 heartbeating to localhost/127.0.0.1:35711 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T15:49:29,245 WARN [BP-1366544186-172.17.0.3-1731340158641 heartbeating to localhost/127.0.0.1:35711 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366544186-172.17.0.3-1731340158641 (Datanode Uuid ce923c52-f9f9-4f1e-b5c3-996b8c246eba) service to localhost/127.0.0.1:35711 2024-11-11T15:49:29,236 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T15:49:29,246 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T15:49:29,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data3/current/BP-1366544186-172.17.0.3-1731340158641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:29,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data4/current/BP-1366544186-172.17.0.3-1731340158641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:29,247 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T15:49:29,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3907d0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T15:49:29,340 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@95e5bc7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:29,340 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:29,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c8150f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:29,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5810c87d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:29,361 WARN [BP-1366544186-172.17.0.3-1731340158641 heartbeating to localhost/127.0.0.1:35711 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T15:49:29,361 WARN [BP-1366544186-172.17.0.3-1731340158641 heartbeating to localhost/127.0.0.1:35711 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366544186-172.17.0.3-1731340158641 (Datanode Uuid 86ad93e0-a3f2-493b-bc93-84c5156c1249) service to localhost/127.0.0.1:35711 2024-11-11T15:49:29,361 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T15:49:29,362 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T15:49:29,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data2/current/BP-1366544186-172.17.0.3-1731340158641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:29,363 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T15:49:29,364 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/cluster_a1db2f04-db68-0f0c-4920-4e2fafdbd946/data/data1/current/BP-1366544186-172.17.0.3-1731340158641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T15:49:29,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b0aa43d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T15:49:29,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44402c27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T15:49:29,428 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T15:49:29,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69d5fb76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T15:49:29,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19064eca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8dfc13ee-a24a-8e54-e5c6-b177d8321526/hadoop.log.dir/,STOPPED} 2024-11-11T15:49:29,457 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T15:49:29,624 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T15:49:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:29,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:29,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:29,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:29,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:29,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T15:49:30,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T15:49:30,130 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=136 (was 85) - Thread LEAK? -, OpenFileDescriptor=511 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1170 (was 1174), ProcessCount=11 (was 11), AvailableMemoryMB=2826 (was 2207) - AvailableMemoryMB LEAK? -