2024-11-09 06:42:12,667 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 06:42:12,678 main DEBUG Took 0.009638 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 06:42:12,678 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 06:42:12,679 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 06:42:12,680 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 06:42:12,681 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,690 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 06:42:12,707 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,709 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,709 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,710 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,710 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,710 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,711 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,711 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,712 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,712 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,713 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,713 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,713 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,714 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,714 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,714 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,715 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,715 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,715 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,716 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,716 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,716 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,717 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,717 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 06:42:12,717 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,718 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 06:42:12,719 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 06:42:12,720 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 06:42:12,722 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 06:42:12,722 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 06:42:12,723 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 06:42:12,724 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 06:42:12,732 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 06:42:12,735 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 06:42:12,736 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 06:42:12,736 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 06:42:12,737 main DEBUG createAppenders(={Console}) 2024-11-09 06:42:12,738 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-09 06:42:12,738 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 06:42:12,738 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-09 06:42:12,738 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 06:42:12,739 main DEBUG OutputStream closed 2024-11-09 06:42:12,739 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 06:42:12,739 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 06:42:12,739 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-09 06:42:12,803 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 06:42:12,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 06:42:12,806 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 06:42:12,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 06:42:12,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 06:42:12,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 06:42:12,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 06:42:12,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 06:42:12,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 06:42:12,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 06:42:12,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 06:42:12,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 06:42:12,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 06:42:12,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 06:42:12,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 06:42:12,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 06:42:12,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 06:42:12,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 06:42:12,814 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 06:42:12,814 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-09 06:42:12,815 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 06:42:12,815 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-09T06:42:12,831 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-09 06:42:12,834 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 06:42:12,834 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T06:42:13,112 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a 2024-11-09T06:42:13,138 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e, deleteOnExit=true 2024-11-09T06:42:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/test.cache.data in system properties and HBase conf 2024-11-09T06:42:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T06:42:13,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir in system properties and HBase conf 2024-11-09T06:42:13,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T06:42:13,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T06:42:13,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T06:42:13,239 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-09T06:42:13,324 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T06:42:13,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T06:42:13,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T06:42:13,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T06:42:13,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T06:42:13,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T06:42:13,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T06:42:13,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T06:42:13,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T06:42:13,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T06:42:13,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/nfs.dump.dir in system properties and HBase conf 2024-11-09T06:42:13,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/java.io.tmpdir in system properties and HBase conf 2024-11-09T06:42:13,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T06:42:13,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T06:42:13,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T06:42:14,152 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-09T06:42:14,226 INFO [Time-limited test {}] log.Log(170): Logging initialized @2208ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-09T06:42:14,300 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:14,361 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:14,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:14,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:14,382 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T06:42:14,394 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:14,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:14,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:14,587 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58dbf239{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/java.io.tmpdir/jetty-localhost-42101-hadoop-hdfs-3_4_1-tests_jar-_-any-14850801687432201998/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T06:42:14,594 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:42101} 2024-11-09T06:42:14,594 INFO [Time-limited test {}] server.Server(415): Started @2577ms 2024-11-09T06:42:14,978 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:14,985 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:14,986 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:14,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:14,987 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:14,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:14,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:15,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65462677{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/java.io.tmpdir/jetty-localhost-35391-hadoop-hdfs-3_4_1-tests_jar-_-any-17635579159207410186/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:15,111 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:35391} 2024-11-09T06:42:15,111 INFO [Time-limited test {}] server.Server(415): Started @3093ms 2024-11-09T06:42:15,167 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T06:42:15,288 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:15,295 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:15,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:15,297 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:15,297 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:15,298 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:15,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:15,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@513cab2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/java.io.tmpdir/jetty-localhost-42137-hadoop-hdfs-3_4_1-tests_jar-_-any-2945176433685134469/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:15,433 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:42137} 2024-11-09T06:42:15,433 INFO [Time-limited test {}] server.Server(415): Started @3416ms 2024-11-09T06:42:15,437 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T06:42:15,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:15,487 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:15,488 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:15,488 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:15,489 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:15,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:15,491 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:15,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@653e6301{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/java.io.tmpdir/jetty-localhost-45339-hadoop-hdfs-3_4_1-tests_jar-_-any-12279428970027097985/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:15,615 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:45339} 2024-11-09T06:42:15,616 INFO [Time-limited test {}] server.Server(415): Started @3598ms 2024-11-09T06:42:15,618 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T06:42:15,643 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data2/current/BP-1470512121-172.17.0.2-1731134533905/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:15,643 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data1/current/BP-1470512121-172.17.0.2-1731134533905/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:15,643 WARN [Thread-112 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data4/current/BP-1470512121-172.17.0.2-1731134533905/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:15,643 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data3/current/BP-1470512121-172.17.0.2-1731134533905/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:15,695 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T06:42:15,697 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T06:42:15,770 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data5/current/BP-1470512121-172.17.0.2-1731134533905/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:15,771 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data6/current/BP-1470512121-172.17.0.2-1731134533905/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:15,788 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x382ff98781e04af3 with lease ID 0xdc5011245acd3bd1: Processing first storage report for DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e from datanode DatanodeRegistration(127.0.0.1:42479, datanodeUuid=cb3436cc-d260-491e-b58a-699d1e26543e, infoPort=37551, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905) 2024-11-09T06:42:15,790 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x382ff98781e04af3 with lease ID 0xdc5011245acd3bd1: from storage DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e node DatanodeRegistration(127.0.0.1:42479, datanodeUuid=cb3436cc-d260-491e-b58a-699d1e26543e, infoPort=37551, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-09T06:42:15,790 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20e1a5e056c0e86c with lease ID 0xdc5011245acd3bd2: Processing first storage report for DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb from datanode DatanodeRegistration(127.0.0.1:42651, datanodeUuid=aa781535-0273-45ba-857a-580b3a60ac8b, infoPort=39269, infoSecurePort=0, ipcPort=33209, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905) 2024-11-09T06:42:15,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20e1a5e056c0e86c with lease ID 0xdc5011245acd3bd2: from storage DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb node DatanodeRegistration(127.0.0.1:42651, datanodeUuid=aa781535-0273-45ba-857a-580b3a60ac8b, infoPort=39269, infoSecurePort=0, ipcPort=33209, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T06:42:15,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20e1a5e056c0e86c with lease ID 0xdc5011245acd3bd2: Processing first storage report for DS-d2f4fa85-f98f-4a2a-a5d7-a5ff4d1b23d3 from datanode DatanodeRegistration(127.0.0.1:42651, datanodeUuid=aa781535-0273-45ba-857a-580b3a60ac8b, infoPort=39269, infoSecurePort=0, ipcPort=33209, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905) 2024-11-09T06:42:15,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20e1a5e056c0e86c with lease ID 0xdc5011245acd3bd2: from storage DS-d2f4fa85-f98f-4a2a-a5d7-a5ff4d1b23d3 node DatanodeRegistration(127.0.0.1:42651, datanodeUuid=aa781535-0273-45ba-857a-580b3a60ac8b, infoPort=39269, infoSecurePort=0, ipcPort=33209, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:15,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x382ff98781e04af3 with lease ID 0xdc5011245acd3bd1: Processing first storage report for DS-1095c516-893d-495c-8d98-a4c8fc46efcc from datanode DatanodeRegistration(127.0.0.1:42479, datanodeUuid=cb3436cc-d260-491e-b58a-699d1e26543e, infoPort=37551, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905) 2024-11-09T06:42:15,792 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x382ff98781e04af3 with lease ID 0xdc5011245acd3bd1: from storage DS-1095c516-893d-495c-8d98-a4c8fc46efcc node DatanodeRegistration(127.0.0.1:42479, datanodeUuid=cb3436cc-d260-491e-b58a-699d1e26543e, infoPort=37551, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T06:42:15,807 WARN [Thread-115 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T06:42:15,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56fa2ea321acdeb2 with lease ID 0xdc5011245acd3bd3: Processing first storage report for DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc from datanode DatanodeRegistration(127.0.0.1:34195, datanodeUuid=af188928-3c7c-410c-a532-38d36424e839, infoPort=42641, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905) 2024-11-09T06:42:15,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56fa2ea321acdeb2 with lease ID 0xdc5011245acd3bd3: from storage DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc node DatanodeRegistration(127.0.0.1:34195, datanodeUuid=af188928-3c7c-410c-a532-38d36424e839, infoPort=42641, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T06:42:15,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56fa2ea321acdeb2 with lease ID 0xdc5011245acd3bd3: Processing first storage report for DS-f5aa4b7d-5c14-47e4-9730-1791b9ac8073 from datanode DatanodeRegistration(127.0.0.1:34195, datanodeUuid=af188928-3c7c-410c-a532-38d36424e839, infoPort=42641, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905) 2024-11-09T06:42:15,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56fa2ea321acdeb2 with lease ID 0xdc5011245acd3bd3: from storage DS-f5aa4b7d-5c14-47e4-9730-1791b9ac8073 node DatanodeRegistration(127.0.0.1:34195, datanodeUuid=af188928-3c7c-410c-a532-38d36424e839, infoPort=42641, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=991781824;c=1731134533905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:16,004 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a 2024-11-09T06:42:16,081 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-09T06:42:16,138 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=24, ProcessCount=11, AvailableMemoryMB=2897 2024-11-09T06:42:16,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T06:42:16,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-09T06:42:16,224 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/zookeeper_0, clientPort=60610, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T06:42:16,235 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60610 2024-11-09T06:42:16,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:16,248 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:16,350 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:16,350 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:16,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:56094 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56094 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:16,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-09T06:42:16,812 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:16,821 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a with version=8 2024-11-09T06:42:16,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/hbase-staging 2024-11-09T06:42:16,914 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-09T06:42:17,161 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:17,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,173 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,177 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:17,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:17,313 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T06:42:17,376 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-09T06:42:17,385 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-09T06:42:17,389 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:17,417 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 9551 (auto-detected) 2024-11-09T06:42:17,418 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-09T06:42:17,436 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33407 2024-11-09T06:42:17,458 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33407 connecting to ZooKeeper ensemble=127.0.0.1:60610 2024-11-09T06:42:17,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334070x0, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:17,492 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33407-0x100fb76a49e0000 connected 2024-11-09T06:42:17,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,520 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:17,533 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a, hbase.cluster.distributed=false 2024-11-09T06:42:17,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:17,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33407 2024-11-09T06:42:17,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33407 2024-11-09T06:42:17,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33407 2024-11-09T06:42:17,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33407 2024-11-09T06:42:17,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33407 2024-11-09T06:42:17,686 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:17,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,688 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:17,689 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,689 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:17,692 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T06:42:17,694 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:17,695 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44301 2024-11-09T06:42:17,697 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44301 connecting to ZooKeeper ensemble=127.0.0.1:60610 2024-11-09T06:42:17,698 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443010x0, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:17,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:443010x0, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:17,710 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44301-0x100fb76a49e0001 connected 2024-11-09T06:42:17,715 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T06:42:17,723 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T06:42:17,726 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T06:42:17,731 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:17,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44301 2024-11-09T06:42:17,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44301 2024-11-09T06:42:17,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44301 2024-11-09T06:42:17,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44301 2024-11-09T06:42:17,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44301 2024-11-09T06:42:17,752 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:17,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,753 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:17,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:17,754 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T06:42:17,754 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:17,755 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44035 2024-11-09T06:42:17,757 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44035 connecting to ZooKeeper ensemble=127.0.0.1:60610 2024-11-09T06:42:17,758 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440350x0, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:17,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:440350x0, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:17,769 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44035-0x100fb76a49e0002 connected 2024-11-09T06:42:17,770 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T06:42:17,771 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T06:42:17,772 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T06:42:17,774 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:17,778 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44035 2024-11-09T06:42:17,779 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44035 2024-11-09T06:42:17,779 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44035 2024-11-09T06:42:17,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44035 2024-11-09T06:42:17,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44035 2024-11-09T06:42:17,800 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:17,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,800 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:17,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:17,801 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:17,801 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T06:42:17,801 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:17,802 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41261 2024-11-09T06:42:17,803 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41261 connecting to ZooKeeper ensemble=127.0.0.1:60610 2024-11-09T06:42:17,805 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:17,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412610x0, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:17,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412610x0, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:17,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41261-0x100fb76a49e0003 connected 2024-11-09T06:42:17,813 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T06:42:17,814 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T06:42:17,815 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T06:42:17,817 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:17,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41261 2024-11-09T06:42:17,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41261 2024-11-09T06:42:17,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41261 2024-11-09T06:42:17,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41261 2024-11-09T06:42:17,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41261 2024-11-09T06:42:17,842 DEBUG [M:0;e09398052c91:33407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e09398052c91:33407 2024-11-09T06:42:17,843 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e09398052c91,33407,1731134536965 2024-11-09T06:42:17,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,854 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e09398052c91,33407,1731134536965 2024-11-09T06:42:17,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:17,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:17,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:17,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:17,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:17,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:17,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:17,886 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T06:42:17,888 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e09398052c91,33407,1731134536965 from backup master directory 2024-11-09T06:42:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e09398052c91,33407,1731134536965 2024-11-09T06:42:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:17,892 WARN [master/e09398052c91:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:17,892 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e09398052c91,33407,1731134536965 2024-11-09T06:42:17,894 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-09T06:42:17,896 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-09T06:42:17,965 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/hbase.id] with ID: 29bb78d3-fda1-488f-bf1a-bbf5a84a5c0c 2024-11-09T06:42:17,965 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/.tmp/hbase.id 2024-11-09T06:42:17,972 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:17,972 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:17,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:50248 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:42651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50248 dst: /127.0.0.1:42651 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:17,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-09T06:42:17,982 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:17,982 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/.tmp/hbase.id]:[hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/hbase.id] 2024-11-09T06:42:18,026 INFO [master/e09398052c91:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:18,030 INFO [master/e09398052c91:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T06:42:18,049 INFO [master/e09398052c91:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-09T06:42:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,064 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,064 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,067 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:49970 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:34195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49970 dst: /127.0.0.1:34195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:18,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-09T06:42:18,073 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:18,089 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T06:42:18,091 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T06:42:18,098 INFO [master/e09398052c91:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T06:42:18,127 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,127 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,131 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:49986 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:34195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49986 dst: /127.0.0.1:34195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:18,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-09T06:42:18,138 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:18,156 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store 2024-11-09T06:42:18,173 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,174 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:56124 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56124 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:18,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-09T06:42:18,182 WARN [master/e09398052c91:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:18,187 INFO [master/e09398052c91:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-09T06:42:18,190 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:18,191 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T06:42:18,191 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:18,191 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:18,193 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T06:42:18,193 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:18,193 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:18,194 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731134538191Disabling compacts and flushes for region at 1731134538191Disabling writes for close at 1731134538193 (+2 ms)Writing region close event to WAL at 1731134538193Closed at 1731134538193 2024-11-09T06:42:18,196 WARN [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/.initializing 2024-11-09T06:42:18,196 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/WALs/e09398052c91,33407,1731134536965 2024-11-09T06:42:18,205 INFO [master/e09398052c91:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T06:42:18,221 INFO [master/e09398052c91:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C33407%2C1731134536965, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/WALs/e09398052c91,33407,1731134536965, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/oldWALs, maxLogs=10 2024-11-09T06:42:18,260 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/WALs/e09398052c91,33407,1731134536965/e09398052c91%2C33407%2C1731134536965.1731134538227, exclude list is [], retry=0 2024-11-09T06:42:18,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:18,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42651,DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb,DISK] 2024-11-09T06:42:18,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e,DISK] 2024-11-09T06:42:18,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34195,DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc,DISK] 2024-11-09T06:42:18,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-09T06:42:18,324 INFO [master/e09398052c91:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/WALs/e09398052c91,33407,1731134536965/e09398052c91%2C33407%2C1731134536965.1731134538227 2024-11-09T06:42:18,324 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39269:39269),(127.0.0.1/127.0.0.1:42641:42641),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-09T06:42:18,325 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T06:42:18,325 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:18,328 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,329 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T06:42:18,395 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:18,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T06:42:18,401 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:18,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,405 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T06:42:18,405 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:18,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,408 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T06:42:18,408 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:18,409 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,412 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,414 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,419 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,419 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,423 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T06:42:18,426 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:18,431 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T06:42:18,432 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71982893, jitterRate=0.0726286917924881}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T06:42:18,438 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731134538342Initializing all the Stores at 1731134538344 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134538344Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134538345 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134538345Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134538345Cleaning up temporary data from old regions at 1731134538419 (+74 ms)Region opened successfully at 1731134538438 (+19 ms) 2024-11-09T06:42:18,439 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T06:42:18,471 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305e630f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:18,502 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T06:42:18,513 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T06:42:18,513 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T06:42:18,516 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T06:42:18,517 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-09T06:42:18,522 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-09T06:42:18,522 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T06:42:18,546 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T06:42:18,557 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T06:42:18,559 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T06:42:18,562 INFO [master/e09398052c91:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T06:42:18,564 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T06:42:18,566 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T06:42:18,568 INFO [master/e09398052c91:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T06:42:18,573 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T06:42:18,574 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T06:42:18,576 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T06:42:18,577 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T06:42:18,600 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T06:42:18,603 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T06:42:18,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:18,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:18,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,611 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e09398052c91,33407,1731134536965, sessionid=0x100fb76a49e0000, setting cluster-up flag (Was=false) 2024-11-09T06:42:18,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,631 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T06:42:18,633 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e09398052c91,33407,1731134536965 2024-11-09T06:42:18,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:18,645 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T06:42:18,646 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e09398052c91,33407,1731134536965 2024-11-09T06:42:18,652 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T06:42:18,723 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:18,732 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(746): ClusterId : 29bb78d3-fda1-488f-bf1a-bbf5a84a5c0c 2024-11-09T06:42:18,732 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(746): ClusterId : 29bb78d3-fda1-488f-bf1a-bbf5a84a5c0c 2024-11-09T06:42:18,732 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(746): ClusterId : 29bb78d3-fda1-488f-bf1a-bbf5a84a5c0c 2024-11-09T06:42:18,734 INFO [master/e09398052c91:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T06:42:18,735 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T06:42:18,735 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T06:42:18,735 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T06:42:18,741 INFO [master/e09398052c91:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T06:42:18,746 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T06:42:18,746 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T06:42:18,746 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T06:42:18,746 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T06:42:18,748 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T06:42:18,749 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T06:42:18,747 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e09398052c91,33407,1731134536965 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T06:42:18,752 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T06:42:18,752 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T06:42:18,752 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T06:42:18,752 DEBUG [RS:1;e09398052c91:44035 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21123f41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:18,752 DEBUG [RS:0;e09398052c91:44301 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2109a9d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:18,753 DEBUG [RS:2;e09398052c91:41261 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52a894e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:18,761 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:18,762 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:18,762 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:18,762 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:18,762 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e09398052c91:0, corePoolSize=10, maxPoolSize=10 2024-11-09T06:42:18,762 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-09T06:42:18,763 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:18,763 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-09T06:42:18,772 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;e09398052c91:41261 2024-11-09T06:42:18,774 DEBUG [RS:0;e09398052c91:44301 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e09398052c91:44301 2024-11-09T06:42:18,776 INFO [RS:2;e09398052c91:41261 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T06:42:18,776 INFO [RS:0;e09398052c91:44301 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T06:42:18,776 INFO [RS:2;e09398052c91:41261 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T06:42:18,776 INFO [RS:0;e09398052c91:44301 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T06:42:18,776 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T06:42:18,776 DEBUG [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T06:42:18,778 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731134568777 2024-11-09T06:42:18,779 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(2659): reportForDuty to master=e09398052c91,33407,1731134536965 with port=41261, startcode=1731134537799 2024-11-09T06:42:18,780 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T06:42:18,780 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:18,780 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(2659): reportForDuty to master=e09398052c91,33407,1731134536965 with port=44301, startcode=1731134537644 2024-11-09T06:42:18,781 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T06:42:18,781 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T06:42:18,782 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e09398052c91:44035 2024-11-09T06:42:18,783 INFO [RS:1;e09398052c91:44035 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T06:42:18,783 INFO [RS:1;e09398052c91:44035 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T06:42:18,783 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T06:42:18,784 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(2659): reportForDuty to master=e09398052c91,33407,1731134536965 with port=44035, startcode=1731134537752 2024-11-09T06:42:18,786 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T06:42:18,786 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T06:42:18,787 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T06:42:18,787 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T06:42:18,788 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,790 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,790 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T06:42:18,792 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T06:42:18,793 DEBUG [RS:1;e09398052c91:44035 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T06:42:18,793 DEBUG [RS:0;e09398052c91:44301 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T06:42:18,794 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T06:42:18,794 DEBUG [RS:2;e09398052c91:41261 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T06:42:18,794 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T06:42:18,800 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T06:42:18,801 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T06:42:18,810 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.large.0-1731134538802,5,FailOnTimeoutGroup] 2024-11-09T06:42:18,810 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.small.0-1731134538810,5,FailOnTimeoutGroup] 2024-11-09T06:42:18,811 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,811 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T06:42:18,812 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,813 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,813 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,813 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:50014 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:34195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50014 dst: /127.0.0.1:34195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:18,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-09T06:42:18,839 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41655, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T06:42:18,839 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45483, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T06:42:18,840 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:18,841 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55909, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T06:42:18,841 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T06:42:18,842 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a 2024-11-09T06:42:18,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33407 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e09398052c91,44301,1731134537644 2024-11-09T06:42:18,850 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,850 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33407 {}] master.ServerManager(517): Registering regionserver=e09398052c91,44301,1731134537644 2024-11-09T06:42:18,850 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:18,862 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:56162 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56162 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:18,863 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33407 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e09398052c91,44035,1731134537752 2024-11-09T06:42:18,863 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33407 {}] master.ServerManager(517): Registering regionserver=e09398052c91,44035,1731134537752 2024-11-09T06:42:18,867 DEBUG [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a 2024-11-09T06:42:18,867 DEBUG [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34217 2024-11-09T06:42:18,867 DEBUG [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T06:42:18,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-09T06:42:18,869 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33407 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e09398052c91,41261,1731134537799 2024-11-09T06:42:18,869 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33407 {}] master.ServerManager(517): Registering regionserver=e09398052c91,41261,1731134537799 2024-11-09T06:42:18,869 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:18,869 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a 2024-11-09T06:42:18,870 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34217 2024-11-09T06:42:18,870 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T06:42:18,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:18,873 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a 2024-11-09T06:42:18,874 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34217 2024-11-09T06:42:18,874 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T06:42:18,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T06:42:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:18,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T06:42:18,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,879 DEBUG [RS:1;e09398052c91:44035 {}] zookeeper.ZKUtil(111): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e09398052c91,44035,1731134537752 2024-11-09T06:42:18,879 WARN [RS:1;e09398052c91:44035 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:18,879 INFO [RS:1;e09398052c91:44035 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T06:42:18,879 DEBUG [RS:2;e09398052c91:41261 {}] zookeeper.ZKUtil(111): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e09398052c91,41261,1731134537799 2024-11-09T06:42:18,879 WARN [RS:2;e09398052c91:41261 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:18,879 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752 2024-11-09T06:42:18,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:18,879 INFO [RS:2;e09398052c91:41261 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T06:42:18,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T06:42:18,880 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,41261,1731134537799 2024-11-09T06:42:18,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e09398052c91,44301,1731134537644] 2024-11-09T06:42:18,881 DEBUG [RS:0;e09398052c91:44301 {}] zookeeper.ZKUtil(111): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e09398052c91,44301,1731134537644 2024-11-09T06:42:18,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e09398052c91,41261,1731134537799] 2024-11-09T06:42:18,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e09398052c91,44035,1731134537752] 2024-11-09T06:42:18,881 WARN [RS:0;e09398052c91:44301 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:18,881 INFO [RS:0;e09398052c91:44301 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T06:42:18,881 DEBUG [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44301,1731134537644 2024-11-09T06:42:18,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T06:42:18,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:18,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T06:42:18,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T06:42:18,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:18,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T06:42:18,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T06:42:18,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:18,894 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:18,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T06:42:18,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740 2024-11-09T06:42:18,896 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740 2024-11-09T06:42:18,900 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T06:42:18,900 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T06:42:18,901 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T06:42:18,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T06:42:18,910 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T06:42:18,911 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63675432, jitterRate=-0.051162123680114746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T06:42:18,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731134538872Initializing all the Stores at 1731134538874 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134538874Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134538874Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134538874Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134538874Cleaning up temporary data from old regions at 1731134538900 (+26 ms)Region opened successfully at 1731134538914 (+14 ms) 2024-11-09T06:42:18,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T06:42:18,914 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T06:42:18,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T06:42:18,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T06:42:18,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T06:42:18,916 INFO [RS:2;e09398052c91:41261 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T06:42:18,916 INFO [RS:0;e09398052c91:44301 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T06:42:18,916 INFO [RS:1;e09398052c91:44035 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T06:42:18,916 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T06:42:18,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731134538914Disabling compacts and flushes for region at 1731134538914Disabling writes for close at 1731134538914Writing region close event to WAL at 1731134538915 (+1 ms)Closed at 1731134538916 (+1 ms) 2024-11-09T06:42:18,919 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:18,919 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T06:42:18,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T06:42:18,932 INFO [RS:1;e09398052c91:44035 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T06:42:18,932 INFO [RS:2;e09398052c91:41261 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T06:42:18,933 INFO [RS:0;e09398052c91:44301 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T06:42:18,935 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T06:42:18,939 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T06:42:18,939 INFO [RS:0;e09398052c91:44301 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T06:42:18,939 INFO [RS:1;e09398052c91:44035 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T06:42:18,939 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,939 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,940 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T06:42:18,940 INFO [RS:2;e09398052c91:41261 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T06:42:18,940 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T06:42:18,941 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,942 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T06:42:18,948 INFO [RS:2;e09398052c91:41261 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T06:42:18,948 INFO [RS:0;e09398052c91:44301 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T06:42:18,948 INFO [RS:1;e09398052c91:44035 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T06:42:18,950 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,950 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,950 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,950 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,950 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,950 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,951 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:18,951 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:18,952 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:18,952 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,952 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,953 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,953 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,953 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,953 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,953 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:18,953 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:18,953 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:18,953 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:18,953 DEBUG [RS:0;e09398052c91:44301 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:18,953 DEBUG [RS:2;e09398052c91:41261 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:18,953 DEBUG [RS:1;e09398052c91:44035 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:18,955 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,955 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,955 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,955 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,955 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,955 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,44301,1731134537644-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:18,956 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,956 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,956 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,957 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,957 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,957 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,44035,1731134537752-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:18,960 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,960 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,961 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,961 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,961 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,961 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,41261,1731134537799-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:18,980 INFO [RS:1;e09398052c91:44035 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T06:42:18,980 INFO [RS:2;e09398052c91:41261 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T06:42:18,982 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,41261,1731134537799-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,982 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,44035,1731134537752-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,982 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,982 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,982 INFO [RS:2;e09398052c91:41261 {}] regionserver.Replication(171): e09398052c91,41261,1731134537799 started 2024-11-09T06:42:18,983 INFO [RS:1;e09398052c91:44035 {}] regionserver.Replication(171): e09398052c91,44035,1731134537752 started 2024-11-09T06:42:18,983 INFO [RS:0;e09398052c91:44301 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T06:42:18,983 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,44301,1731134537644-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,983 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:18,984 INFO [RS:0;e09398052c91:44301 {}] regionserver.Replication(171): e09398052c91,44301,1731134537644 started 2024-11-09T06:42:19,000 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,001 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1482): Serving as e09398052c91,41261,1731134537799, RpcServer on e09398052c91/172.17.0.2:41261, sessionid=0x100fb76a49e0003 2024-11-09T06:42:19,001 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,001 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1482): Serving as e09398052c91,44035,1731134537752, RpcServer on e09398052c91/172.17.0.2:44035, sessionid=0x100fb76a49e0002 2024-11-09T06:42:19,002 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T06:42:19,002 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T06:42:19,002 DEBUG [RS:2;e09398052c91:41261 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e09398052c91,41261,1731134537799 2024-11-09T06:42:19,002 DEBUG [RS:1;e09398052c91:44035 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e09398052c91,44035,1731134537752 2024-11-09T06:42:19,002 DEBUG [RS:2;e09398052c91:41261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,41261,1731134537799' 2024-11-09T06:42:19,002 DEBUG [RS:1;e09398052c91:44035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,44035,1731134537752' 2024-11-09T06:42:19,002 DEBUG [RS:2;e09398052c91:41261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T06:42:19,002 DEBUG [RS:1;e09398052c91:44035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T06:42:19,003 DEBUG [RS:2;e09398052c91:41261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T06:42:19,004 DEBUG [RS:1;e09398052c91:44035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T06:42:19,004 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T06:42:19,004 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T06:42:19,004 DEBUG [RS:2;e09398052c91:41261 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e09398052c91,41261,1731134537799 2024-11-09T06:42:19,004 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T06:42:19,004 DEBUG [RS:2;e09398052c91:41261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,41261,1731134537799' 2024-11-09T06:42:19,004 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T06:42:19,004 DEBUG [RS:2;e09398052c91:41261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T06:42:19,004 DEBUG [RS:1;e09398052c91:44035 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e09398052c91,44035,1731134537752 2024-11-09T06:42:19,004 DEBUG [RS:1;e09398052c91:44035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,44035,1731134537752' 2024-11-09T06:42:19,004 DEBUG [RS:1;e09398052c91:44035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T06:42:19,005 DEBUG [RS:2;e09398052c91:41261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T06:42:19,005 DEBUG [RS:1;e09398052c91:44035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T06:42:19,006 DEBUG [RS:2;e09398052c91:41261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T06:42:19,006 INFO [RS:2;e09398052c91:41261 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T06:42:19,006 DEBUG [RS:1;e09398052c91:44035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T06:42:19,006 INFO [RS:2;e09398052c91:41261 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T06:42:19,006 INFO [RS:1;e09398052c91:44035 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T06:42:19,006 INFO [RS:1;e09398052c91:44035 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T06:42:19,007 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,007 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(1482): Serving as e09398052c91,44301,1731134537644, RpcServer on e09398052c91/172.17.0.2:44301, sessionid=0x100fb76a49e0001 2024-11-09T06:42:19,008 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T06:42:19,008 DEBUG [RS:0;e09398052c91:44301 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e09398052c91,44301,1731134537644 2024-11-09T06:42:19,008 DEBUG [RS:0;e09398052c91:44301 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,44301,1731134537644' 2024-11-09T06:42:19,008 DEBUG [RS:0;e09398052c91:44301 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T06:42:19,009 DEBUG [RS:0;e09398052c91:44301 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T06:42:19,009 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T06:42:19,009 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T06:42:19,009 DEBUG [RS:0;e09398052c91:44301 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e09398052c91,44301,1731134537644 2024-11-09T06:42:19,010 DEBUG [RS:0;e09398052c91:44301 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,44301,1731134537644' 2024-11-09T06:42:19,010 DEBUG [RS:0;e09398052c91:44301 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T06:42:19,010 DEBUG [RS:0;e09398052c91:44301 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T06:42:19,011 DEBUG [RS:0;e09398052c91:44301 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T06:42:19,011 INFO [RS:0;e09398052c91:44301 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T06:42:19,011 INFO [RS:0;e09398052c91:44301 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T06:42:19,090 WARN [e09398052c91:33407 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-09T06:42:19,112 INFO [RS:1;e09398052c91:44035 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T06:42:19,112 INFO [RS:0;e09398052c91:44301 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T06:42:19,112 INFO [RS:2;e09398052c91:41261 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T06:42:19,115 INFO [RS:0;e09398052c91:44301 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C44301%2C1731134537644, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44301,1731134537644, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs, maxLogs=32 2024-11-09T06:42:19,115 INFO [RS:2;e09398052c91:41261 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C41261%2C1731134537799, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,41261,1731134537799, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs, maxLogs=32 2024-11-09T06:42:19,115 INFO [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C44035%2C1731134537752, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs, maxLogs=32 2024-11-09T06:42:19,133 DEBUG [RS:0;e09398052c91:44301 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44301,1731134537644/e09398052c91%2C44301%2C1731134537644.1731134539119, exclude list is [], retry=0 2024-11-09T06:42:19,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34195,DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc,DISK] 2024-11-09T06:42:19,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42651,DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb,DISK] 2024-11-09T06:42:19,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e,DISK] 2024-11-09T06:42:19,157 DEBUG [RS:1;e09398052c91:44035 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752/e09398052c91%2C44035%2C1731134537752.1731134539119, exclude list is [], retry=0 2024-11-09T06:42:19,157 DEBUG [RS:2;e09398052c91:41261 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,41261,1731134537799/e09398052c91%2C41261%2C1731134537799.1731134539119, exclude list is [], retry=0 2024-11-09T06:42:19,160 INFO [RS:0;e09398052c91:44301 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44301,1731134537644/e09398052c91%2C44301%2C1731134537644.1731134539119 2024-11-09T06:42:19,160 DEBUG [RS:0;e09398052c91:44301 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39269:39269),(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:42641:42641)] 2024-11-09T06:42:19,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42651,DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb,DISK] 2024-11-09T06:42:19,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34195,DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc,DISK] 2024-11-09T06:42:19,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34195,DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc,DISK] 2024-11-09T06:42:19,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e,DISK] 2024-11-09T06:42:19,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42651,DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb,DISK] 2024-11-09T06:42:19,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e,DISK] 2024-11-09T06:42:19,172 INFO [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752/e09398052c91%2C44035%2C1731134537752.1731134539119 2024-11-09T06:42:19,172 INFO [RS:2;e09398052c91:41261 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,41261,1731134537799/e09398052c91%2C41261%2C1731134537799.1731134539119 2024-11-09T06:42:19,172 DEBUG [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42641:42641),(127.0.0.1/127.0.0.1:39269:39269),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-09T06:42:19,173 DEBUG [RS:2;e09398052c91:41261 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39269:39269),(127.0.0.1/127.0.0.1:42641:42641),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-09T06:42:19,343 DEBUG [e09398052c91:33407 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T06:42:19,351 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(204): Hosts are {e09398052c91=0} racks are {/default-rack=0} 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T06:42:19,358 INFO [e09398052c91:33407 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T06:42:19,358 INFO [e09398052c91:33407 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T06:42:19,358 INFO [e09398052c91:33407 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T06:42:19,358 DEBUG [e09398052c91:33407 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T06:42:19,365 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e09398052c91,44035,1731134537752 2024-11-09T06:42:19,372 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e09398052c91,44035,1731134537752, state=OPENING 2024-11-09T06:42:19,377 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T06:42:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:19,381 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,381 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,381 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,381 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,383 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T06:42:19,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e09398052c91,44035,1731134537752}] 2024-11-09T06:42:19,562 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T06:42:19,564 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T06:42:19,577 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T06:42:19,578 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T06:42:19,578 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-09T06:42:19,581 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C44035%2C1731134537752.meta, suffix=.meta, logDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs, maxLogs=32 2024-11-09T06:42:19,598 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752/e09398052c91%2C44035%2C1731134537752.meta.1731134539583.meta, exclude list is [], retry=0 2024-11-09T06:42:19,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42651,DS-5540a4b8-7160-41f6-ab1e-3dfa431f36fb,DISK] 2024-11-09T06:42:19,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34195,DS-1a6d5c85-8e6c-45bd-ad2f-22e086241cbc,DISK] 2024-11-09T06:42:19,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42479,DS-0c8ba812-e569-4051-b4dc-e1c8681bf36e,DISK] 2024-11-09T06:42:19,605 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/WALs/e09398052c91,44035,1731134537752/e09398052c91%2C44035%2C1731134537752.meta.1731134539583.meta 2024-11-09T06:42:19,605 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39269:39269),(127.0.0.1/127.0.0.1:42641:42641),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-09T06:42:19,605 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T06:42:19,607 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T06:42:19,610 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T06:42:19,614 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T06:42:19,619 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T06:42:19,619 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:19,619 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T06:42:19,619 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T06:42:19,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T06:42:19,624 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T06:42:19,624 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:19,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:19,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T06:42:19,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T06:42:19,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:19,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:19,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T06:42:19,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T06:42:19,630 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:19,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:19,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T06:42:19,633 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T06:42:19,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:19,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:19,634 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T06:42:19,635 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740 2024-11-09T06:42:19,638 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740 2024-11-09T06:42:19,641 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T06:42:19,641 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T06:42:19,642 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T06:42:19,645 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T06:42:19,647 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60049503, jitterRate=-0.10519267618656158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T06:42:19,647 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T06:42:19,649 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731134539620Writing region info on filesystem at 1731134539620Initializing all the Stores at 1731134539622 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134539622Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134539623 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134539623Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134539623Cleaning up temporary data from old regions at 1731134539641 (+18 ms)Running coprocessor post-open hooks at 1731134539647 (+6 ms)Region opened successfully at 1731134539649 (+2 ms) 2024-11-09T06:42:19,656 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731134539553 2024-11-09T06:42:19,667 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T06:42:19,668 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T06:42:19,669 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e09398052c91,44035,1731134537752 2024-11-09T06:42:19,671 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e09398052c91,44035,1731134537752, state=OPEN 2024-11-09T06:42:19,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:19,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:19,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:19,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:19,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,674 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:19,674 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e09398052c91,44035,1731134537752 2024-11-09T06:42:19,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T06:42:19,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e09398052c91,44035,1731134537752 in 289 msec 2024-11-09T06:42:19,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T06:42:19,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 755 msec 2024-11-09T06:42:19,686 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:19,686 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T06:42:19,708 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T06:42:19,710 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e09398052c91,44035,1731134537752, seqNum=-1] 2024-11-09T06:42:19,733 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T06:42:19,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52703, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T06:42:19,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0730 sec 2024-11-09T06:42:19,754 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731134539754, completionTime=-1 2024-11-09T06:42:19,757 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T06:42:19,757 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-09T06:42:19,813 INFO [master/e09398052c91:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-09T06:42:19,814 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731134599814 2024-11-09T06:42:19,814 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731134659814 2024-11-09T06:42:19,814 INFO [master/e09398052c91:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 56 msec 2024-11-09T06:42:19,815 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T06:42:19,826 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33407,1731134536965-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,826 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33407,1731134536965-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,826 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33407,1731134536965-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,828 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e09398052c91:33407, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,828 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,828 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,835 DEBUG [master/e09398052c91:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T06:42:19,855 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.963sec 2024-11-09T06:42:19,857 INFO [master/e09398052c91:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T06:42:19,858 INFO [master/e09398052c91:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T06:42:19,859 INFO [master/e09398052c91:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T06:42:19,859 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T06:42:19,859 INFO [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T06:42:19,860 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33407,1731134536965-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:19,861 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33407,1731134536965-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T06:42:19,865 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T06:42:19,865 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T06:42:19,866 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33407,1731134536965-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:19,942 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c981dce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T06:42:19,946 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-09T06:42:19,946 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-09T06:42:19,950 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e09398052c91,33407,-1 for getting cluster id 2024-11-09T06:42:19,952 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T06:42:19,960 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '29bb78d3-fda1-488f-bf1a-bbf5a84a5c0c' 2024-11-09T06:42:19,963 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T06:42:19,963 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "29bb78d3-fda1-488f-bf1a-bbf5a84a5c0c" 2024-11-09T06:42:19,963 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341dc378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T06:42:19,964 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e09398052c91,33407,-1] 2024-11-09T06:42:19,967 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T06:42:19,969 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:19,970 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T06:42:19,973 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f42820e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T06:42:19,974 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T06:42:19,980 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e09398052c91,44035,1731134537752, seqNum=-1] 2024-11-09T06:42:19,981 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T06:42:19,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43082, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T06:42:20,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e09398052c91,33407,1731134536965 2024-11-09T06:42:20,006 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T06:42:20,011 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e09398052c91,33407,1731134536965 2024-11-09T06:42:20,013 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@23668e74 2024-11-09T06:42:20,014 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T06:42:20,016 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54630, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T06:42:20,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T06:42:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T06:42:20,031 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T06:42:20,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T06:42:20,033 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:20,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T06:42:20,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:20,044 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:20,044 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:20,047 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:56214 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56214 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:20,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-09T06:42:20,056 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:20,059 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 538a58f29c662d53bae321a83740b000, NAME => 'TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a 2024-11-09T06:42:20,064 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:20,064 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:20,067 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:56228 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56228 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:20,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-09T06:42:20,074 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:20,075 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:20,075 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 538a58f29c662d53bae321a83740b000, disabling compactions & flushes 2024-11-09T06:42:20,075 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,075 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,076 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. after waiting 0 ms 2024-11-09T06:42:20,076 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,076 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,076 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 538a58f29c662d53bae321a83740b000: Waiting for close lock at 1731134540075Disabling compacts and flushes for region at 1731134540075Disabling writes for close at 1731134540076 (+1 ms)Writing region close event to WAL at 1731134540076Closed at 1731134540076 2024-11-09T06:42:20,078 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T06:42:20,083 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731134540078"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731134540078"}]},"ts":"1731134540078"} 2024-11-09T06:42:20,089 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T06:42:20,091 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T06:42:20,093 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731134540091"}]},"ts":"1731134540091"} 2024-11-09T06:42:20,098 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T06:42:20,098 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {e09398052c91=0} racks are {/default-rack=0} 2024-11-09T06:42:20,099 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T06:42:20,099 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T06:42:20,099 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T06:42:20,099 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T06:42:20,099 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T06:42:20,099 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T06:42:20,099 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T06:42:20,099 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T06:42:20,099 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T06:42:20,100 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T06:42:20,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=538a58f29c662d53bae321a83740b000, ASSIGN}] 2024-11-09T06:42:20,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=538a58f29c662d53bae321a83740b000, ASSIGN 2024-11-09T06:42:20,105 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=538a58f29c662d53bae321a83740b000, ASSIGN; state=OFFLINE, location=e09398052c91,41261,1731134537799; forceNewPlan=false, retain=false 2024-11-09T06:42:20,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:20,257 INFO [e09398052c91:33407 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T06:42:20,258 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=538a58f29c662d53bae321a83740b000, regionState=OPENING, regionLocation=e09398052c91,41261,1731134537799 2024-11-09T06:42:20,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=538a58f29c662d53bae321a83740b000, ASSIGN because future has completed 2024-11-09T06:42:20,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 538a58f29c662d53bae321a83740b000, server=e09398052c91,41261,1731134537799}] 2024-11-09T06:42:20,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:20,418 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T06:42:20,420 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57777, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T06:42:20,425 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,426 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 538a58f29c662d53bae321a83740b000, NAME => 'TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000.', STARTKEY => '', ENDKEY => ''} 2024-11-09T06:42:20,426 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,427 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:20,427 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,427 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,429 INFO [StoreOpener-538a58f29c662d53bae321a83740b000-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,432 INFO [StoreOpener-538a58f29c662d53bae321a83740b000-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 538a58f29c662d53bae321a83740b000 columnFamilyName cf 2024-11-09T06:42:20,432 DEBUG [StoreOpener-538a58f29c662d53bae321a83740b000-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:20,433 INFO [StoreOpener-538a58f29c662d53bae321a83740b000-1 {}] regionserver.HStore(327): Store=538a58f29c662d53bae321a83740b000/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:20,433 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,434 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,434 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,435 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,435 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,438 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,442 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T06:42:20,443 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 538a58f29c662d53bae321a83740b000; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64174979, jitterRate=-0.04371829330921173}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T06:42:20,443 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:20,444 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 538a58f29c662d53bae321a83740b000: Running coprocessor pre-open hook at 1731134540427Writing region info on filesystem at 1731134540427Initializing all the Stores at 1731134540429 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134540429Cleaning up temporary data from old regions at 1731134540435 (+6 ms)Running coprocessor post-open hooks at 1731134540443 (+8 ms)Region opened successfully at 1731134540444 (+1 ms) 2024-11-09T06:42:20,446 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000., pid=6, masterSystemTime=1731134540417 2024-11-09T06:42:20,449 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,449 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,450 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=538a58f29c662d53bae321a83740b000, regionState=OPEN, openSeqNum=2, regionLocation=e09398052c91,41261,1731134537799 2024-11-09T06:42:20,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 538a58f29c662d53bae321a83740b000, server=e09398052c91,41261,1731134537799 because future has completed 2024-11-09T06:42:20,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T06:42:20,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 538a58f29c662d53bae321a83740b000, server=e09398052c91,41261,1731134537799 in 194 msec 2024-11-09T06:42:20,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T06:42:20,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=538a58f29c662d53bae321a83740b000, ASSIGN in 359 msec 2024-11-09T06:42:20,465 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T06:42:20,466 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731134540465"}]},"ts":"1731134540465"} 2024-11-09T06:42:20,468 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T06:42:20,470 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T06:42:20,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 446 msec 2024-11-09T06:42:20,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:20,668 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T06:42:20,668 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T06:42:20,670 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T06:42:20,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T06:42:20,675 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T06:42:20,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T06:42:20,683 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000., hostname=e09398052c91,41261,1731134537799, seqNum=2] 2024-11-09T06:42:20,684 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T06:42:20,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T06:42:20,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T06:42:20,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T06:42:20,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:20,702 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T06:42:20,704 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T06:42:20,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T06:42:20,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:20,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41261 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T06:42:20,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:20,872 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 538a58f29c662d53bae321a83740b000 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T06:42:20,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/.tmp/cf/bd70769fd9284037ba4642353342c6c9 is 36, key is row/cf:cq/1731134540687/Put/seqid=0 2024-11-09T06:42:20,945 WARN [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:20,946 WARN [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:20,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-363800893_22 at /127.0.0.1:46932 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46932 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:20,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-09T06:42:20,955 WARN [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:20,955 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/.tmp/cf/bd70769fd9284037ba4642353342c6c9 2024-11-09T06:42:21,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/.tmp/cf/bd70769fd9284037ba4642353342c6c9 as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/cf/bd70769fd9284037ba4642353342c6c9 2024-11-09T06:42:21,010 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/cf/bd70769fd9284037ba4642353342c6c9, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T06:42:21,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:21,019 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 538a58f29c662d53bae321a83740b000 in 146ms, sequenceid=5, compaction requested=false 2024-11-09T06:42:21,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-09T06:42:21,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 538a58f29c662d53bae321a83740b000: 2024-11-09T06:42:21,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:21,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T06:42:21,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T06:42:21,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T06:42:21,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 324 msec 2024-11-09T06:42:21,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 338 msec 2024-11-09T06:42:21,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:21,328 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T06:42:21,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T06:42:21,342 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T06:42:21,342 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:21,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,347 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T06:42:21,347 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T06:42:21,347 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1434548836, stopped=false 2024-11-09T06:42:21,348 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e09398052c91,33407,1731134536965 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:21,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:21,350 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T06:42:21,350 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T06:42:21,351 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:21,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:21,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:21,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:21,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:21,351 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e09398052c91,44301,1731134537644' ***** 2024-11-09T06:42:21,351 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T06:42:21,351 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e09398052c91,44035,1731134537752' ***** 2024-11-09T06:42:21,352 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T06:42:21,352 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e09398052c91,41261,1731134537799' ***** 2024-11-09T06:42:21,352 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T06:42:21,352 INFO [RS:0;e09398052c91:44301 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T06:42:21,352 INFO [RS:1;e09398052c91:44035 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T06:42:21,353 INFO [RS:0;e09398052c91:44301 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T06:42:21,353 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T06:42:21,353 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T06:42:21,353 INFO [RS:2;e09398052c91:41261 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T06:42:21,353 INFO [RS:1;e09398052c91:44035 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T06:42:21,353 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T06:42:21,353 INFO [RS:0;e09398052c91:44301 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T06:42:21,353 INFO [RS:2;e09398052c91:41261 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T06:42:21,353 INFO [RS:2;e09398052c91:41261 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T06:42:21,353 INFO [RS:1;e09398052c91:44035 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T06:42:21,353 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(959): stopping server e09398052c91,44301,1731134537644 2024-11-09T06:42:21,353 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(959): stopping server e09398052c91,44035,1731134537752 2024-11-09T06:42:21,353 INFO [RS:0;e09398052c91:44301 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:21,353 INFO [RS:1;e09398052c91:44035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:21,353 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(3091): Received CLOSE for 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:21,353 INFO [RS:0;e09398052c91:44301 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e09398052c91:44301. 2024-11-09T06:42:21,353 INFO [RS:1;e09398052c91:44035 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e09398052c91:44035. 2024-11-09T06:42:21,353 DEBUG [RS:0;e09398052c91:44301 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:21,353 DEBUG [RS:1;e09398052c91:44035 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:21,353 DEBUG [RS:0;e09398052c91:44301 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,353 DEBUG [RS:1;e09398052c91:44035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,354 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(976): stopping server e09398052c91,44301,1731134537644; all regions closed. 2024-11-09T06:42:21,354 INFO [RS:1;e09398052c91:44035 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T06:42:21,354 INFO [RS:1;e09398052c91:44035 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T06:42:21,354 INFO [RS:1;e09398052c91:44035 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T06:42:21,354 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T06:42:21,354 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(959): stopping server e09398052c91,41261,1731134537799 2024-11-09T06:42:21,354 INFO [RS:2;e09398052c91:41261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:21,354 INFO [RS:2;e09398052c91:41261 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;e09398052c91:41261. 2024-11-09T06:42:21,354 DEBUG [RS:2;e09398052c91:41261 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:21,354 DEBUG [RS:2;e09398052c91:41261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,354 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T06:42:21,354 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 538a58f29c662d53bae321a83740b000, disabling compactions & flushes 2024-11-09T06:42:21,354 INFO [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:21,355 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T06:42:21,355 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:21,355 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1325): Online Regions={538a58f29c662d53bae321a83740b000=TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000.} 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. after waiting 0 ms 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:21,355 DEBUG [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T06:42:21,355 DEBUG [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1351): Waiting on 538a58f29c662d53bae321a83740b000 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T06:42:21,355 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T06:42:21,355 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T06:42:21,356 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T06:42:21,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741826_1016 (size=93) 2024-11-09T06:42:21,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_1073741826_1016 (size=93) 2024-11-09T06:42:21,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_1073741826_1016 (size=93) 2024-11-09T06:42:21,362 INFO [regionserver/e09398052c91:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:21,363 INFO [regionserver/e09398052c91:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:21,368 DEBUG [RS:0;e09398052c91:44301 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs 2024-11-09T06:42:21,368 INFO [RS:0;e09398052c91:44301 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e09398052c91%2C44301%2C1731134537644:(num 1731134539119) 2024-11-09T06:42:21,368 DEBUG [RS:0;e09398052c91:44301 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,368 INFO [RS:0;e09398052c91:44301 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:21,368 INFO [RS:0;e09398052c91:44301 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:21,368 INFO [RS:0;e09398052c91:44301 {}] hbase.ChoreService(370): Chore service for: regionserver/e09398052c91:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:21,369 INFO [RS:0;e09398052c91:44301 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T06:42:21,369 INFO [RS:0;e09398052c91:44301 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T06:42:21,369 INFO [RS:0;e09398052c91:44301 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T06:42:21,369 INFO [RS:0;e09398052c91:44301 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:21,369 INFO [regionserver/e09398052c91:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:21,369 INFO [RS:0;e09398052c91:44301 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44301 2024-11-09T06:42:21,372 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/default/TestHBaseWalOnEC/538a58f29c662d53bae321a83740b000/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T06:42:21,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e09398052c91,44301,1731134537644 2024-11-09T06:42:21,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:21,373 INFO [RS:0;e09398052c91:44301 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:21,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e09398052c91,44301,1731134537644] 2024-11-09T06:42:21,375 INFO [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:21,375 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 538a58f29c662d53bae321a83740b000: Waiting for close lock at 1731134541354Running coprocessor pre-close hooks at 1731134541354Disabling compacts and flushes for region at 1731134541354Disabling writes for close at 1731134541355 (+1 ms)Writing region close event to WAL at 1731134541356 (+1 ms)Running coprocessor post-close hooks at 1731134541374 (+18 ms)Closed at 1731134541375 (+1 ms) 2024-11-09T06:42:21,376 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000. 2024-11-09T06:42:21,378 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e09398052c91,44301,1731134537644 already deleted, retry=false 2024-11-09T06:42:21,378 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e09398052c91,44301,1731134537644 expired; onlineServers=2 2024-11-09T06:42:21,386 INFO [regionserver/e09398052c91:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:21,392 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/info/fc1dbb4e5d364b7cb6017fd1bf55ce74 is 153, key is TestHBaseWalOnEC,,1731134540018.538a58f29c662d53bae321a83740b000./info:regioninfo/1731134540450/Put/seqid=0 2024-11-09T06:42:21,395 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,395 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,399 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1671699073_22 at /127.0.0.1:46946 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46946 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-09T06:42:21,403 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,404 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/info/fc1dbb4e5d364b7cb6017fd1bf55ce74 2024-11-09T06:42:21,431 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/ns/6ef9c493800f4253949b4bc7d52188ed is 43, key is default/ns:d/1731134539739/Put/seqid=0 2024-11-09T06:42:21,433 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,434 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1671699073_22 at /127.0.0.1:54324 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:42651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54324 dst: /127.0.0.1:42651 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-09T06:42:21,442 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,442 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/ns/6ef9c493800f4253949b4bc7d52188ed 2024-11-09T06:42:21,468 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/table/7160a9d785224f6484b6fb0d2a12edeb is 52, key is TestHBaseWalOnEC/table:state/1731134540465/Put/seqid=0 2024-11-09T06:42:21,470 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,470 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,473 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1671699073_22 at /127.0.0.1:54358 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:42651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54358 dst: /127.0.0.1:42651 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44301-0x100fb76a49e0001, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,478 INFO [RS:0;e09398052c91:44301 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:21,478 INFO [RS:0;e09398052c91:44301 {}] regionserver.HRegionServer(1031): Exiting; stopping=e09398052c91,44301,1731134537644; zookeeper connection closed. 2024-11-09T06:42:21,479 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f9f6460 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f9f6460 2024-11-09T06:42:21,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-09T06:42:21,480 WARN [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,480 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/table/7160a9d785224f6484b6fb0d2a12edeb 2024-11-09T06:42:21,490 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/info/fc1dbb4e5d364b7cb6017fd1bf55ce74 as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/info/fc1dbb4e5d364b7cb6017fd1bf55ce74 2024-11-09T06:42:21,498 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/info/fc1dbb4e5d364b7cb6017fd1bf55ce74, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T06:42:21,499 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/ns/6ef9c493800f4253949b4bc7d52188ed as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/ns/6ef9c493800f4253949b4bc7d52188ed 2024-11-09T06:42:21,507 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/ns/6ef9c493800f4253949b4bc7d52188ed, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T06:42:21,508 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/.tmp/table/7160a9d785224f6484b6fb0d2a12edeb as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/table/7160a9d785224f6484b6fb0d2a12edeb 2024-11-09T06:42:21,517 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/table/7160a9d785224f6484b6fb0d2a12edeb, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T06:42:21,518 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false 2024-11-09T06:42:21,519 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-09T06:42:21,527 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T06:42:21,528 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T06:42:21,528 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T06:42:21,528 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731134541355Running coprocessor pre-close hooks at 1731134541355Disabling compacts and flushes for region at 1731134541355Disabling writes for close at 1731134541355Obtaining lock to block concurrent updates at 1731134541356 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731134541356Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731134541356Flushing stores of hbase:meta,,1.1588230740 at 1731134541358 (+2 ms)Flushing 1588230740/info: creating writer at 1731134541358Flushing 1588230740/info: appending metadata at 1731134541388 (+30 ms)Flushing 1588230740/info: closing flushed file at 1731134541389 (+1 ms)Flushing 1588230740/ns: creating writer at 1731134541413 (+24 ms)Flushing 1588230740/ns: appending metadata at 1731134541430 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731134541430Flushing 1588230740/table: creating writer at 1731134541451 (+21 ms)Flushing 1588230740/table: appending metadata at 1731134541467 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731134541467Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ab1156e: reopening flushed file at 1731134541489 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6db65d6: reopening flushed file at 1731134541498 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45ab3ada: reopening flushed file at 1731134541507 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false at 1731134541519 (+12 ms)Writing region close event to WAL at 1731134541521 (+2 ms)Running coprocessor post-close hooks at 1731134541528 (+7 ms)Closed at 1731134541528 2024-11-09T06:42:21,529 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T06:42:21,555 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(976): stopping server e09398052c91,41261,1731134537799; all regions closed. 2024-11-09T06:42:21,555 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(976): stopping server e09398052c91,44035,1731134537752; all regions closed. 2024-11-09T06:42:21,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_1073741829_1019 (size=2751) 2024-11-09T06:42:21,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741829_1019 (size=2751) 2024-11-09T06:42:21,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_1073741827_1017 (size=1298) 2024-11-09T06:42:21,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741827_1017 (size=1298) 2024-11-09T06:42:21,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_1073741829_1019 (size=2751) 2024-11-09T06:42:21,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_1073741827_1017 (size=1298) 2024-11-09T06:42:21,564 DEBUG [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs 2024-11-09T06:42:21,564 INFO [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e09398052c91%2C44035%2C1731134537752.meta:.meta(num 1731134539583) 2024-11-09T06:42:21,564 DEBUG [RS:2;e09398052c91:41261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs 2024-11-09T06:42:21,564 INFO [RS:2;e09398052c91:41261 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e09398052c91%2C41261%2C1731134537799:(num 1731134539119) 2024-11-09T06:42:21,564 DEBUG [RS:2;e09398052c91:41261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,564 INFO [RS:2;e09398052c91:41261 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:21,564 INFO [RS:2;e09398052c91:41261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:21,564 INFO [RS:2;e09398052c91:41261 {}] hbase.ChoreService(370): Chore service for: regionserver/e09398052c91:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:21,564 INFO [RS:2;e09398052c91:41261 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T06:42:21,565 INFO [RS:2;e09398052c91:41261 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T06:42:21,565 INFO [RS:2;e09398052c91:41261 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T06:42:21,565 INFO [regionserver/e09398052c91:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:21,565 INFO [RS:2;e09398052c91:41261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:21,565 INFO [RS:2;e09398052c91:41261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41261 2024-11-09T06:42:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_1073741828_1018 (size=93) 2024-11-09T06:42:21,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e09398052c91,41261,1731134537799 2024-11-09T06:42:21,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:21,567 INFO [RS:2;e09398052c91:41261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:21,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_1073741828_1018 (size=93) 2024-11-09T06:42:21,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741828_1018 (size=93) 2024-11-09T06:42:21,569 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e09398052c91,41261,1731134537799] 2024-11-09T06:42:21,571 DEBUG [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/oldWALs 2024-11-09T06:42:21,571 INFO [RS:1;e09398052c91:44035 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e09398052c91%2C44035%2C1731134537752:(num 1731134539119) 2024-11-09T06:42:21,571 DEBUG [RS:1;e09398052c91:44035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:21,571 INFO [RS:1;e09398052c91:44035 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:21,571 INFO [RS:1;e09398052c91:44035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:21,571 INFO [RS:1;e09398052c91:44035 {}] hbase.ChoreService(370): Chore service for: regionserver/e09398052c91:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:21,571 INFO [RS:1;e09398052c91:44035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:21,571 INFO [regionserver/e09398052c91:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:21,571 INFO [RS:1;e09398052c91:44035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44035 2024-11-09T06:42:21,572 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e09398052c91,41261,1731134537799 already deleted, retry=false 2024-11-09T06:42:21,572 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e09398052c91,41261,1731134537799 expired; onlineServers=1 2024-11-09T06:42:21,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e09398052c91,44035,1731134537752 2024-11-09T06:42:21,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:21,573 INFO [RS:1;e09398052c91:44035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:21,574 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e09398052c91,44035,1731134537752] 2024-11-09T06:42:21,576 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e09398052c91,44035,1731134537752 already deleted, retry=false 2024-11-09T06:42:21,576 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e09398052c91,44035,1731134537752 expired; onlineServers=0 2024-11-09T06:42:21,576 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e09398052c91,33407,1731134536965' ***** 2024-11-09T06:42:21,576 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T06:42:21,576 INFO [M:0;e09398052c91:33407 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:21,576 INFO [M:0;e09398052c91:33407 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:21,576 DEBUG [M:0;e09398052c91:33407 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T06:42:21,577 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T06:42:21,577 DEBUG [master/e09398052c91:0:becomeActiveMaster-HFileCleaner.large.0-1731134538802 {}] cleaner.HFileCleaner(306): Exit Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.large.0-1731134538802,5,FailOnTimeoutGroup] 2024-11-09T06:42:21,577 DEBUG [M:0;e09398052c91:33407 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T06:42:21,577 DEBUG [master/e09398052c91:0:becomeActiveMaster-HFileCleaner.small.0-1731134538810 {}] cleaner.HFileCleaner(306): Exit Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.small.0-1731134538810,5,FailOnTimeoutGroup] 2024-11-09T06:42:21,577 INFO [M:0;e09398052c91:33407 {}] hbase.ChoreService(370): Chore service for: master/e09398052c91:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:21,577 INFO [M:0;e09398052c91:33407 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:21,577 DEBUG [M:0;e09398052c91:33407 {}] master.HMaster(1795): Stopping service threads 2024-11-09T06:42:21,577 INFO [M:0;e09398052c91:33407 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T06:42:21,577 INFO [M:0;e09398052c91:33407 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T06:42:21,578 INFO [M:0;e09398052c91:33407 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T06:42:21,578 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T06:42:21,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:21,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:21,579 DEBUG [M:0;e09398052c91:33407 {}] zookeeper.ZKUtil(347): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T06:42:21,579 WARN [M:0;e09398052c91:33407 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T06:42:21,580 INFO [M:0;e09398052c91:33407 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/.lastflushedseqids 2024-11-09T06:42:21,589 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,589 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:34852 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:34195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34852 dst: /127.0.0.1:34195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-09T06:42:21,596 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,596 INFO [M:0;e09398052c91:33407 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T06:42:21,596 INFO [M:0;e09398052c91:33407 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T06:42:21,596 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T06:42:21,596 INFO [M:0;e09398052c91:33407 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:21,596 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:21,596 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T06:42:21,596 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:21,596 INFO [M:0;e09398052c91:33407 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-09T06:42:21,615 DEBUG [M:0;e09398052c91:33407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62e7a6a508bd4b42960b9ecf50e291a1 is 82, key is hbase:meta,,1/info:regioninfo/1731134539669/Put/seqid=0 2024-11-09T06:42:21,617 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,617 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:54380 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:42651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54380 dst: /127.0.0.1:42651 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-09T06:42:21,624 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,624 INFO [M:0;e09398052c91:33407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62e7a6a508bd4b42960b9ecf50e291a1 2024-11-09T06:42:21,655 DEBUG [M:0;e09398052c91:33407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b95cfe04b5364009960a1e70966776ac is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731134540471/Put/seqid=0 2024-11-09T06:42:21,657 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,657 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:46970 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:42479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46970 dst: /127.0.0.1:42479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-09T06:42:21,665 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,665 INFO [M:0;e09398052c91:33407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b95cfe04b5364009960a1e70966776ac 2024-11-09T06:42:21,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,669 INFO [RS:2;e09398052c91:41261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:21,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41261-0x100fb76a49e0003, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,669 INFO [RS:2;e09398052c91:41261 {}] regionserver.HRegionServer(1031): Exiting; stopping=e09398052c91,41261,1731134537799; zookeeper connection closed. 2024-11-09T06:42:21,670 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@37813257 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@37813257 2024-11-09T06:42:21,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,675 INFO [RS:1;e09398052c91:44035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:21,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44035-0x100fb76a49e0002, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,675 INFO [RS:1;e09398052c91:44035 {}] regionserver.HRegionServer(1031): Exiting; stopping=e09398052c91,44035,1731134537752; zookeeper connection closed. 2024-11-09T06:42:21,675 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d2df919 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d2df919 2024-11-09T06:42:21,676 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T06:42:21,690 DEBUG [M:0;e09398052c91:33407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59b9659738e34a10b8b52f0f4cf49e8c is 69, key is e09398052c91,41261,1731134537799/rs:state/1731134538870/Put/seqid=0 2024-11-09T06:42:21,691 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,691 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T06:42:21,694 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291574702_22 at /127.0.0.1:34878 [Receiving block BP-1470512121-172.17.0.2-1731134533905:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34195:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34878 dst: /127.0.0.1:34195 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T06:42:21,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-09T06:42:21,698 WARN [M:0;e09398052c91:33407 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T06:42:21,698 INFO [M:0;e09398052c91:33407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59b9659738e34a10b8b52f0f4cf49e8c 2024-11-09T06:42:21,707 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62e7a6a508bd4b42960b9ecf50e291a1 as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62e7a6a508bd4b42960b9ecf50e291a1 2024-11-09T06:42:21,715 INFO [M:0;e09398052c91:33407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62e7a6a508bd4b42960b9ecf50e291a1, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T06:42:21,717 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b95cfe04b5364009960a1e70966776ac as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b95cfe04b5364009960a1e70966776ac 2024-11-09T06:42:21,725 INFO [M:0;e09398052c91:33407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b95cfe04b5364009960a1e70966776ac, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T06:42:21,726 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59b9659738e34a10b8b52f0f4cf49e8c as hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59b9659738e34a10b8b52f0f4cf49e8c 2024-11-09T06:42:21,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-09T06:42:21,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-09T06:42:21,736 INFO [M:0;e09398052c91:33407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59b9659738e34a10b8b52f0f4cf49e8c, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T06:42:21,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-09T06:42:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-09T06:42:21,738 INFO [M:0;e09398052c91:33407 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=72, compaction requested=false 2024-11-09T06:42:21,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-09T06:42:21,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-09T06:42:21,741 INFO [M:0;e09398052c91:33407 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:21,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-09T06:42:21,741 DEBUG [M:0;e09398052c91:33407 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731134541596Disabling compacts and flushes for region at 1731134541596Disabling writes for close at 1731134541596Obtaining lock to block concurrent updates at 1731134541596Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731134541596Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731134541597 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731134541598 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731134541598Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731134541614 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731134541614Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731134541632 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731134541654 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731134541655 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731134541673 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731134541689 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731134541689Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d441a82: reopening flushed file at 1731134541706 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a3986ab: reopening flushed file at 1731134541715 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d24bea5: reopening flushed file at 1731134541725 (+10 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=72, compaction requested=false at 1731134541738 (+13 ms)Writing region close event to WAL at 1731134541741 (+3 ms)Closed at 1731134541741 2024-11-09T06:42:21,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-09T06:42:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741825_1011 (size=32686) 2024-11-09T06:42:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_1073741825_1011 (size=32686) 2024-11-09T06:42:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_1073741825_1011 (size=32686) 2024-11-09T06:42:21,746 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:21,746 INFO [M:0;e09398052c91:33407 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T06:42:21,746 INFO [M:0;e09398052c91:33407 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33407 2024-11-09T06:42:21,746 INFO [M:0;e09398052c91:33407 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34195 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-09T06:42:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42651 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-09T06:42:21,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,848 INFO [M:0;e09398052c91:33407 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:21,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33407-0x100fb76a49e0000, quorum=127.0.0.1:60610, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:21,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@653e6301{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:21,855 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:21,855 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:21,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:21,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:21,857 WARN [BP-1470512121-172.17.0.2-1731134533905 heartbeating to localhost/127.0.0.1:34217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T06:42:21,857 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T06:42:21,857 WARN [BP-1470512121-172.17.0.2-1731134533905 heartbeating to localhost/127.0.0.1:34217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1470512121-172.17.0.2-1731134533905 (Datanode Uuid af188928-3c7c-410c-a532-38d36424e839) service to localhost/127.0.0.1:34217 2024-11-09T06:42:21,857 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T06:42:21,859 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data5/current/BP-1470512121-172.17.0.2-1731134533905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:21,859 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data6/current/BP-1470512121-172.17.0.2-1731134533905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:21,859 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T06:42:21,861 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@513cab2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:21,862 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:21,862 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:21,862 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:21,862 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:21,863 WARN [BP-1470512121-172.17.0.2-1731134533905 heartbeating to localhost/127.0.0.1:34217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T06:42:21,863 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T06:42:21,863 WARN [BP-1470512121-172.17.0.2-1731134533905 heartbeating to localhost/127.0.0.1:34217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1470512121-172.17.0.2-1731134533905 (Datanode Uuid aa781535-0273-45ba-857a-580b3a60ac8b) service to localhost/127.0.0.1:34217 2024-11-09T06:42:21,863 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T06:42:21,864 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data3/current/BP-1470512121-172.17.0.2-1731134533905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:21,864 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data4/current/BP-1470512121-172.17.0.2-1731134533905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:21,864 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T06:42:21,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65462677{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:21,869 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:21,869 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:21,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:21,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:21,871 WARN [BP-1470512121-172.17.0.2-1731134533905 heartbeating to localhost/127.0.0.1:34217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T06:42:21,871 WARN [BP-1470512121-172.17.0.2-1731134533905 heartbeating to localhost/127.0.0.1:34217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1470512121-172.17.0.2-1731134533905 (Datanode Uuid cb3436cc-d260-491e-b58a-699d1e26543e) service to localhost/127.0.0.1:34217 2024-11-09T06:42:21,871 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T06:42:21,871 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T06:42:21,871 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data1/current/BP-1470512121-172.17.0.2-1731134533905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:21,872 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/cluster_125b990d-e8cf-1339-872f-1ca2fe15962e/data/data2/current/BP-1470512121-172.17.0.2-1731134533905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:21,872 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T06:42:21,880 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58dbf239{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T06:42:21,881 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:21,881 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:21,881 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:21,881 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:21,890 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T06:42:21,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T06:42:21,929 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=38 (was 24) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2633 (was 2897) 2024-11-09T06:42:21,935 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=38, ProcessCount=11, AvailableMemoryMB=2633 2024-11-09T06:42:21,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T06:42:21,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.log.dir so I do NOT create it in target/test-data/1545262f-be67-2a37-a19b-17630f873904 2024-11-09T06:42:21,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad8cd752-8b44-b08d-c9e1-41f5afb4d30a/hadoop.tmp.dir so I do NOT create it in target/test-data/1545262f-be67-2a37-a19b-17630f873904 2024-11-09T06:42:21,936 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d, deleteOnExit=true 2024-11-09T06:42:21,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/test.cache.data in system properties and HBase conf 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir in system properties and HBase conf 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T06:42:21,937 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T06:42:21,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/nfs.dump.dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/java.io.tmpdir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T06:42:21,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T06:42:22,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:22,038 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:22,039 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:22,039 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:22,039 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:22,040 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:22,041 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58cabbb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:22,041 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@467f22c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:22,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3977a7da{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/java.io.tmpdir/jetty-localhost-40817-hadoop-hdfs-3_4_1-tests_jar-_-any-1770152234811422484/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T06:42:22,157 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7094f416{HTTP/1.1, (http/1.1)}{localhost:40817} 2024-11-09T06:42:22,157 INFO [Time-limited test {}] server.Server(415): Started @10140ms 2024-11-09T06:42:22,246 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:22,250 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:22,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:22,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:22,251 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:22,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dae6551{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:22,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23b354d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:22,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@377873f6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/java.io.tmpdir/jetty-localhost-40987-hadoop-hdfs-3_4_1-tests_jar-_-any-10500305972928428565/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:22,368 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d2295b8{HTTP/1.1, (http/1.1)}{localhost:40987} 2024-11-09T06:42:22,368 INFO [Time-limited test {}] server.Server(415): Started @10351ms 2024-11-09T06:42:22,369 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T06:42:22,407 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:22,411 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:22,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:22,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:22,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:22,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c077530{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:22,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d788f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:22,481 WARN [Thread-528 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data2/current/BP-358983239-172.17.0.2-1731134541970/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:22,481 WARN [Thread-527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data1/current/BP-358983239-172.17.0.2-1731134541970/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:22,502 WARN [Thread-506 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T06:42:22,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4e182539e7e5ee1 with lease ID 0x4188b64fdd1ee570: Processing first storage report for DS-4ee2f4d3-2645-41a3-bbdb-05767f20dfe0 from datanode DatanodeRegistration(127.0.0.1:36351, datanodeUuid=f5744ac6-c1b8-48fc-b937-aab444d5211d, infoPort=42245, infoSecurePort=0, ipcPort=39551, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970) 2024-11-09T06:42:22,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4e182539e7e5ee1 with lease ID 0x4188b64fdd1ee570: from storage DS-4ee2f4d3-2645-41a3-bbdb-05767f20dfe0 node DatanodeRegistration(127.0.0.1:36351, datanodeUuid=f5744ac6-c1b8-48fc-b937-aab444d5211d, infoPort=42245, infoSecurePort=0, ipcPort=39551, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T06:42:22,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4e182539e7e5ee1 with lease ID 0x4188b64fdd1ee570: Processing first storage report for DS-e856b147-11b4-45e8-a527-d5aff5782b59 from datanode DatanodeRegistration(127.0.0.1:36351, datanodeUuid=f5744ac6-c1b8-48fc-b937-aab444d5211d, infoPort=42245, infoSecurePort=0, ipcPort=39551, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970) 2024-11-09T06:42:22,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4e182539e7e5ee1 with lease ID 0x4188b64fdd1ee570: from storage DS-e856b147-11b4-45e8-a527-d5aff5782b59 node DatanodeRegistration(127.0.0.1:36351, datanodeUuid=f5744ac6-c1b8-48fc-b937-aab444d5211d, infoPort=42245, infoSecurePort=0, ipcPort=39551, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:22,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@191f750f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/java.io.tmpdir/jetty-localhost-46643-hadoop-hdfs-3_4_1-tests_jar-_-any-3564800080692360060/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:22,545 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6811044f{HTTP/1.1, (http/1.1)}{localhost:46643} 2024-11-09T06:42:22,545 INFO [Time-limited test {}] server.Server(415): Started @10528ms 2024-11-09T06:42:22,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T06:42:22,583 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T06:42:22,586 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T06:42:22,587 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T06:42:22,587 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T06:42:22,587 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T06:42:22,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b15d8c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,AVAILABLE} 2024-11-09T06:42:22,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@427b8cb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T06:42:22,650 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data4/current/BP-358983239-172.17.0.2-1731134541970/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:22,650 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data3/current/BP-358983239-172.17.0.2-1731134541970/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:22,667 WARN [Thread-542 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T06:42:22,670 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78762a79c37cc with lease ID 0x4188b64fdd1ee571: Processing first storage report for DS-2437dc7b-fcce-47d7-acfa-fde6ef5c3c12 from datanode DatanodeRegistration(127.0.0.1:41459, datanodeUuid=3aa875f0-3d5a-4219-9d51-9a0eefe11d00, infoPort=36247, infoSecurePort=0, ipcPort=36569, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970) 2024-11-09T06:42:22,671 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78762a79c37cc with lease ID 0x4188b64fdd1ee571: from storage DS-2437dc7b-fcce-47d7-acfa-fde6ef5c3c12 node DatanodeRegistration(127.0.0.1:41459, datanodeUuid=3aa875f0-3d5a-4219-9d51-9a0eefe11d00, infoPort=36247, infoSecurePort=0, ipcPort=36569, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:22,671 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78762a79c37cc with lease ID 0x4188b64fdd1ee571: Processing first storage report for DS-3bab2d54-a8a6-4dd0-b786-8c81f770cf9e from datanode DatanodeRegistration(127.0.0.1:41459, datanodeUuid=3aa875f0-3d5a-4219-9d51-9a0eefe11d00, infoPort=36247, infoSecurePort=0, ipcPort=36569, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970) 2024-11-09T06:42:22,671 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78762a79c37cc with lease ID 0x4188b64fdd1ee571: from storage DS-3bab2d54-a8a6-4dd0-b786-8c81f770cf9e node DatanodeRegistration(127.0.0.1:41459, datanodeUuid=3aa875f0-3d5a-4219-9d51-9a0eefe11d00, infoPort=36247, infoSecurePort=0, ipcPort=36569, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:22,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3364f2e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/java.io.tmpdir/jetty-localhost-46351-hadoop-hdfs-3_4_1-tests_jar-_-any-10377424310466443217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:22,708 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f76b201{HTTP/1.1, (http/1.1)}{localhost:46351} 2024-11-09T06:42:22,709 INFO [Time-limited test {}] server.Server(415): Started @10691ms 2024-11-09T06:42:22,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T06:42:22,817 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data5/current/BP-358983239-172.17.0.2-1731134541970/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:22,817 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data6/current/BP-358983239-172.17.0.2-1731134541970/current, will proceed with Du for space computation calculation, 2024-11-09T06:42:22,840 WARN [Thread-577 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T06:42:22,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf497a25af55634e7 with lease ID 0x4188b64fdd1ee572: Processing first storage report for DS-163c9eb7-656a-42c8-aad6-288cfbc4a9dc from datanode DatanodeRegistration(127.0.0.1:40843, datanodeUuid=9396e4c7-a583-4715-abc7-a03a3609cda7, infoPort=40087, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970) 2024-11-09T06:42:22,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf497a25af55634e7 with lease ID 0x4188b64fdd1ee572: from storage DS-163c9eb7-656a-42c8-aad6-288cfbc4a9dc node DatanodeRegistration(127.0.0.1:40843, datanodeUuid=9396e4c7-a583-4715-abc7-a03a3609cda7, infoPort=40087, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:22,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf497a25af55634e7 with lease ID 0x4188b64fdd1ee572: Processing first storage report for DS-b98db2d0-1170-4cc7-93ae-ca612dc816c2 from datanode DatanodeRegistration(127.0.0.1:40843, datanodeUuid=9396e4c7-a583-4715-abc7-a03a3609cda7, infoPort=40087, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970) 2024-11-09T06:42:22,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf497a25af55634e7 with lease ID 0x4188b64fdd1ee572: from storage DS-b98db2d0-1170-4cc7-93ae-ca612dc816c2 node DatanodeRegistration(127.0.0.1:40843, datanodeUuid=9396e4c7-a583-4715-abc7-a03a3609cda7, infoPort=40087, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=264531612;c=1731134541970), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T06:42:22,943 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904 2024-11-09T06:42:22,946 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/zookeeper_0, clientPort=62482, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T06:42:22,947 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62482 2024-11-09T06:42:22,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:22,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741825_1001 (size=7) 2024-11-09T06:42:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741825_1001 (size=7) 2024-11-09T06:42:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741825_1001 (size=7) 2024-11-09T06:42:22,963 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e with version=8 2024-11-09T06:42:22,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34217/user/jenkins/test-data/a2389093-6748-def1-315d-6070dd88060a/hbase-staging 2024-11-09T06:42:22,966 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T06:42:22,966 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:22,967 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33111 2024-11-09T06:42:22,968 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33111 connecting to ZooKeeper ensemble=127.0.0.1:62482 2024-11-09T06:42:22,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331110x0, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:22,977 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33111-0x100fb76bef10000 connected 2024-11-09T06:42:22,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:22,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:22,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:22,997 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e, hbase.cluster.distributed=false 2024-11-09T06:42:22,998 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:22,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33111 2024-11-09T06:42:22,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33111 2024-11-09T06:42:22,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33111 2024-11-09T06:42:22,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33111 2024-11-09T06:42:22,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33111 2024-11-09T06:42:23,015 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:23,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,015 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:23,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:23,016 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T06:42:23,016 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:23,016 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34377 2024-11-09T06:42:23,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34377 connecting to ZooKeeper ensemble=127.0.0.1:62482 2024-11-09T06:42:23,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343770x0, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:23,025 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34377-0x100fb76bef10001 connected 2024-11-09T06:42:23,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:23,025 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T06:42:23,026 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T06:42:23,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T06:42:23,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:23,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34377 2024-11-09T06:42:23,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34377 2024-11-09T06:42:23,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34377 2024-11-09T06:42:23,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34377 2024-11-09T06:42:23,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34377 2024-11-09T06:42:23,045 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T06:42:23,045 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:23,046 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46343 2024-11-09T06:42:23,047 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46343 connecting to ZooKeeper ensemble=127.0.0.1:62482 2024-11-09T06:42:23,048 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,049 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463430x0, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:23,054 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:463430x0, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:23,054 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46343-0x100fb76bef10002 connected 2024-11-09T06:42:23,055 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T06:42:23,055 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T06:42:23,056 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T06:42:23,057 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:23,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46343 2024-11-09T06:42:23,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46343 2024-11-09T06:42:23,059 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46343 2024-11-09T06:42:23,059 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46343 2024-11-09T06:42:23,059 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46343 2024-11-09T06:42:23,074 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e09398052c91:0 server-side Connection retries=45 2024-11-09T06:42:23,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,075 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T06:42:23,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T06:42:23,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T06:42:23,075 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T06:42:23,075 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T06:42:23,076 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44805 2024-11-09T06:42:23,077 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44805 connecting to ZooKeeper ensemble=127.0.0.1:62482 2024-11-09T06:42:23,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448050x0, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T06:42:23,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44805-0x100fb76bef10003 connected 2024-11-09T06:42:23,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:23,084 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T06:42:23,084 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T06:42:23,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T06:42:23,086 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T06:42:23,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44805 2024-11-09T06:42:23,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44805 2024-11-09T06:42:23,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44805 2024-11-09T06:42:23,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44805 2024-11-09T06:42:23,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44805 2024-11-09T06:42:23,098 DEBUG [M:0;e09398052c91:33111 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e09398052c91:33111 2024-11-09T06:42:23,099 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e09398052c91,33111,1731134542965 2024-11-09T06:42:23,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,102 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e09398052c91,33111,1731134542965 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,105 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T06:42:23,106 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e09398052c91,33111,1731134542965 from backup master directory 2024-11-09T06:42:23,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e09398052c91,33111,1731134542965 2024-11-09T06:42:23,107 WARN [master/e09398052c91:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:23,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,107 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e09398052c91,33111,1731134542965 2024-11-09T06:42:23,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T06:42:23,114 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/hbase.id] with ID: af1edeec-714f-4394-ba78-2f6dc023dd9f 2024-11-09T06:42:23,114 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/.tmp/hbase.id 2024-11-09T06:42:23,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741826_1002 (size=42) 2024-11-09T06:42:23,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741826_1002 (size=42) 2024-11-09T06:42:23,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741826_1002 (size=42) 2024-11-09T06:42:23,125 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/.tmp/hbase.id]:[hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/hbase.id] 2024-11-09T06:42:23,141 INFO [master/e09398052c91:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T06:42:23,141 INFO [master/e09398052c91:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T06:42:23,142 INFO [master/e09398052c91:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-09T06:42:23,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741827_1003 (size=196) 2024-11-09T06:42:23,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741827_1003 (size=196) 2024-11-09T06:42:23,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741827_1003 (size=196) 2024-11-09T06:42:23,155 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T06:42:23,156 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T06:42:23,156 INFO [master/e09398052c91:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T06:42:23,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741828_1004 (size=1189) 2024-11-09T06:42:23,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741828_1004 (size=1189) 2024-11-09T06:42:23,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741828_1004 (size=1189) 2024-11-09T06:42:23,169 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store 2024-11-09T06:42:23,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741829_1005 (size=34) 2024-11-09T06:42:23,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741829_1005 (size=34) 2024-11-09T06:42:23,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741829_1005 (size=34) 2024-11-09T06:42:23,179 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:23,179 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T06:42:23,179 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:23,179 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:23,179 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T06:42:23,179 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:23,179 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:23,179 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731134543179Disabling compacts and flushes for region at 1731134543179Disabling writes for close at 1731134543179Writing region close event to WAL at 1731134543179Closed at 1731134543179 2024-11-09T06:42:23,180 WARN [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/.initializing 2024-11-09T06:42:23,180 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/WALs/e09398052c91,33111,1731134542965 2024-11-09T06:42:23,184 INFO [master/e09398052c91:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C33111%2C1731134542965, suffix=, logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/WALs/e09398052c91,33111,1731134542965, archiveDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/oldWALs, maxLogs=10 2024-11-09T06:42:23,185 INFO [master/e09398052c91:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e09398052c91%2C33111%2C1731134542965.1731134543185 2024-11-09T06:42:23,195 INFO [master/e09398052c91:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/WALs/e09398052c91,33111,1731134542965/e09398052c91%2C33111%2C1731134542965.1731134543185 2024-11-09T06:42:23,196 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42245:42245),(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:40087:40087)] 2024-11-09T06:42:23,197 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T06:42:23,197 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:23,197 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,197 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T06:42:23,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T06:42:23,203 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:23,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T06:42:23,207 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:23,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T06:42:23,209 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:23,210 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,211 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,211 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,213 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,213 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,213 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T06:42:23,215 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T06:42:23,217 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T06:42:23,218 INFO [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61990991, jitterRate=-0.07626225054264069}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T06:42:23,219 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731134543197Initializing all the Stores at 1731134543199 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543199Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134543199Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134543199Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134543199Cleaning up temporary data from old regions at 1731134543213 (+14 ms)Region opened successfully at 1731134543219 (+6 ms) 2024-11-09T06:42:23,219 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T06:42:23,223 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e11c4c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:23,224 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T06:42:23,224 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T06:42:23,224 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T06:42:23,224 INFO [master/e09398052c91:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T06:42:23,225 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-09T06:42:23,225 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-09T06:42:23,225 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T06:42:23,228 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T06:42:23,228 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T06:42:23,230 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T06:42:23,231 INFO [master/e09398052c91:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T06:42:23,231 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T06:42:23,232 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T06:42:23,233 INFO [master/e09398052c91:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T06:42:23,234 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T06:42:23,235 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T06:42:23,235 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T06:42:23,237 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T06:42:23,239 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T06:42:23,241 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:23,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,243 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e09398052c91,33111,1731134542965, sessionid=0x100fb76bef10000, setting cluster-up flag (Was=false) 2024-11-09T06:42:23,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,251 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T06:42:23,253 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e09398052c91,33111,1731134542965 2024-11-09T06:42:23,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,262 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T06:42:23,263 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e09398052c91,33111,1731134542965 2024-11-09T06:42:23,265 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T06:42:23,267 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:23,268 INFO [master/e09398052c91:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T06:42:23,268 INFO [master/e09398052c91:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T06:42:23,268 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e09398052c91,33111,1731134542965 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T06:42:23,269 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e09398052c91:0, corePoolSize=5, maxPoolSize=5 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e09398052c91:0, corePoolSize=10, maxPoolSize=10 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:23,270 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731134573271 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T06:42:23,271 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,272 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T06:42:23,272 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T06:42:23,272 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T06:42:23,272 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T06:42:23,272 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:23,272 INFO [master/e09398052c91:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T06:42:23,272 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T06:42:23,273 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.large.0-1731134543272,5,FailOnTimeoutGroup] 2024-11-09T06:42:23,273 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.small.0-1731134543273,5,FailOnTimeoutGroup] 2024-11-09T06:42:23,273 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,273 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T06:42:23,273 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,273 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,274 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,274 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T06:42:23,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741831_1007 (size=1321) 2024-11-09T06:42:23,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741831_1007 (size=1321) 2024-11-09T06:42:23,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741831_1007 (size=1321) 2024-11-09T06:42:23,285 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T06:42:23,285 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e 2024-11-09T06:42:23,290 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(746): ClusterId : af1edeec-714f-4394-ba78-2f6dc023dd9f 2024-11-09T06:42:23,290 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(746): ClusterId : af1edeec-714f-4394-ba78-2f6dc023dd9f 2024-11-09T06:42:23,290 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T06:42:23,290 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T06:42:23,290 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(746): ClusterId : af1edeec-714f-4394-ba78-2f6dc023dd9f 2024-11-09T06:42:23,290 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T06:42:23,293 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T06:42:23,293 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T06:42:23,294 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T06:42:23,294 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T06:42:23,294 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T06:42:23,294 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T06:42:23,296 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T06:42:23,297 DEBUG [RS:1;e09398052c91:46343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7344d9f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:23,297 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T06:42:23,297 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T06:42:23,297 DEBUG [RS:2;e09398052c91:44805 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7edce4c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:23,297 DEBUG [RS:0;e09398052c91:34377 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78f262c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e09398052c91/172.17.0.2:0 2024-11-09T06:42:23,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741832_1008 (size=32) 2024-11-09T06:42:23,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741832_1008 (size=32) 2024-11-09T06:42:23,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741832_1008 (size=32) 2024-11-09T06:42:23,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:23,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T06:42:23,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T06:42:23,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T06:42:23,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T06:42:23,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T06:42:23,312 DEBUG [RS:0;e09398052c91:34377 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e09398052c91:34377 2024-11-09T06:42:23,312 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e09398052c91:46343 2024-11-09T06:42:23,312 INFO [RS:0;e09398052c91:34377 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T06:42:23,312 INFO [RS:0;e09398052c91:34377 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T06:42:23,312 INFO [RS:1;e09398052c91:46343 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T06:42:23,312 DEBUG [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T06:42:23,312 INFO [RS:1;e09398052c91:46343 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T06:42:23,312 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T06:42:23,313 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(2659): reportForDuty to master=e09398052c91,33111,1731134542965 with port=46343, startcode=1731134543044 2024-11-09T06:42:23,313 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(2659): reportForDuty to master=e09398052c91,33111,1731134542965 with port=34377, startcode=1731134543015 2024-11-09T06:42:23,314 DEBUG [RS:1;e09398052c91:46343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T06:42:23,314 DEBUG [RS:0;e09398052c91:34377 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T06:42:23,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T06:42:23,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T06:42:23,315 DEBUG [RS:2;e09398052c91:44805 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;e09398052c91:44805 2024-11-09T06:42:23,315 INFO [RS:2;e09398052c91:44805 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T06:42:23,315 INFO [RS:2;e09398052c91:44805 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T06:42:23,315 DEBUG [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T06:42:23,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T06:42:23,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,316 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(2659): reportForDuty to master=e09398052c91,33111,1731134542965 with port=44805, startcode=1731134543074 2024-11-09T06:42:23,317 DEBUG [RS:2;e09398052c91:44805 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T06:42:23,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T06:42:23,318 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37811, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T06:42:23,318 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740 2024-11-09T06:42:23,319 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33111 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e09398052c91,46343,1731134543044 2024-11-09T06:42:23,319 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33111 {}] master.ServerManager(517): Registering regionserver=e09398052c91,46343,1731134543044 2024-11-09T06:42:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740 2024-11-09T06:42:23,319 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58747, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T06:42:23,320 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51005, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T06:42:23,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T06:42:23,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T06:42:23,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33111 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e09398052c91,34377,1731134543015 2024-11-09T06:42:23,322 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e 2024-11-09T06:42:23,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33111 {}] master.ServerManager(517): Registering regionserver=e09398052c91,34377,1731134543015 2024-11-09T06:42:23,322 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42727 2024-11-09T06:42:23,322 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T06:42:23,322 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T06:42:23,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T06:42:23,324 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33111 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e09398052c91,44805,1731134543074 2024-11-09T06:42:23,324 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33111 {}] master.ServerManager(517): Registering regionserver=e09398052c91,44805,1731134543074 2024-11-09T06:42:23,324 DEBUG [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e 2024-11-09T06:42:23,324 DEBUG [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42727 2024-11-09T06:42:23,324 DEBUG [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T06:42:23,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:23,327 DEBUG [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e 2024-11-09T06:42:23,327 DEBUG [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42727 2024-11-09T06:42:23,327 DEBUG [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T06:42:23,327 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T06:42:23,328 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62073113, jitterRate=-0.07503853738307953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T06:42:23,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731134543305Initializing all the Stores at 1731134543306 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543307 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543307Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134543307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543307Cleaning up temporary data from old regions at 1731134543321 (+14 ms)Region opened successfully at 1731134543329 (+8 ms) 2024-11-09T06:42:23,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T06:42:23,329 DEBUG [RS:1;e09398052c91:46343 {}] zookeeper.ZKUtil(111): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e09398052c91,46343,1731134543044 2024-11-09T06:42:23,329 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T06:42:23,329 WARN [RS:1;e09398052c91:46343 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:23,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T06:42:23,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e09398052c91,34377,1731134543015] 2024-11-09T06:42:23,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e09398052c91,46343,1731134543044] 2024-11-09T06:42:23,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T06:42:23,329 INFO [RS:1;e09398052c91:46343 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T06:42:23,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T06:42:23,329 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,46343,1731134543044 2024-11-09T06:42:23,330 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T06:42:23,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:23,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731134543329Disabling compacts and flushes for region at 1731134543329Disabling writes for close at 1731134543329Writing region close event to WAL at 1731134543330 (+1 ms)Closed at 1731134543330 2024-11-09T06:42:23,330 DEBUG [RS:0;e09398052c91:34377 {}] zookeeper.ZKUtil(111): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e09398052c91,34377,1731134543015 2024-11-09T06:42:23,330 WARN [RS:0;e09398052c91:34377 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:23,330 INFO [RS:0;e09398052c91:34377 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T06:42:23,330 DEBUG [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,34377,1731134543015 2024-11-09T06:42:23,331 DEBUG [RS:2;e09398052c91:44805 {}] zookeeper.ZKUtil(111): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e09398052c91,44805,1731134543074 2024-11-09T06:42:23,331 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e09398052c91,44805,1731134543074] 2024-11-09T06:42:23,331 WARN [RS:2;e09398052c91:44805 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T06:42:23,331 INFO [RS:2;e09398052c91:44805 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T06:42:23,331 DEBUG [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,44805,1731134543074 2024-11-09T06:42:23,332 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:23,332 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T06:42:23,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T06:42:23,334 INFO [RS:1;e09398052c91:46343 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T06:42:23,335 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T06:42:23,337 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T06:42:23,338 INFO [RS:1;e09398052c91:46343 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T06:42:23,338 INFO [RS:1;e09398052c91:46343 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T06:42:23,338 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,338 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T06:42:23,340 INFO [RS:1;e09398052c91:46343 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T06:42:23,340 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,340 INFO [RS:0;e09398052c91:34377 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,340 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,341 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,341 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,341 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,341 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,341 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:23,341 DEBUG [RS:1;e09398052c91:46343 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:23,341 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,341 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,341 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,342 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,342 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,342 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,46343,1731134543044-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:23,342 INFO [RS:2;e09398052c91:44805 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T06:42:23,343 INFO [RS:0;e09398052c91:34377 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T06:42:23,344 INFO [RS:0;e09398052c91:34377 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T06:42:23,345 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,345 INFO [RS:2;e09398052c91:44805 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T06:42:23,345 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T06:42:23,346 INFO [RS:2;e09398052c91:44805 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T06:42:23,346 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,346 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T06:42:23,347 INFO [RS:0;e09398052c91:34377 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T06:42:23,347 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,347 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,347 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,347 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,347 INFO [RS:2;e09398052c91:44805 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T06:42:23,347 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,347 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e09398052c91:0, corePoolSize=2, maxPoolSize=2 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,348 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,349 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:23,349 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,349 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,349 DEBUG [RS:0;e09398052c91:34377 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:23,349 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,349 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e09398052c91:0, corePoolSize=1, maxPoolSize=1 2024-11-09T06:42:23,349 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:23,349 DEBUG [RS:2;e09398052c91:44805 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0, corePoolSize=3, maxPoolSize=3 2024-11-09T06:42:23,351 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,351 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,351 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,351 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,351 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,351 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,352 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,352 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,34377,1731134543015-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:23,352 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,352 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,352 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,352 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,44805,1731134543074-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:23,361 INFO [RS:1;e09398052c91:46343 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T06:42:23,362 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,46343,1731134543044-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,362 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,362 INFO [RS:1;e09398052c91:46343 {}] regionserver.Replication(171): e09398052c91,46343,1731134543044 started 2024-11-09T06:42:23,375 INFO [RS:0;e09398052c91:34377 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T06:42:23,375 INFO [RS:2;e09398052c91:44805 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T06:42:23,376 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,44805,1731134543074-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,376 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,34377,1731134543015-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,376 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,376 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,376 INFO [RS:2;e09398052c91:44805 {}] regionserver.Replication(171): e09398052c91,44805,1731134543074 started 2024-11-09T06:42:23,376 INFO [RS:0;e09398052c91:34377 {}] regionserver.Replication(171): e09398052c91,34377,1731134543015 started 2024-11-09T06:42:23,377 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,377 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1482): Serving as e09398052c91,46343,1731134543044, RpcServer on e09398052c91/172.17.0.2:46343, sessionid=0x100fb76bef10002 2024-11-09T06:42:23,378 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T06:42:23,378 DEBUG [RS:1;e09398052c91:46343 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e09398052c91,46343,1731134543044 2024-11-09T06:42:23,378 DEBUG [RS:1;e09398052c91:46343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,46343,1731134543044' 2024-11-09T06:42:23,378 DEBUG [RS:1;e09398052c91:46343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T06:42:23,378 DEBUG [RS:1;e09398052c91:46343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T06:42:23,379 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T06:42:23,379 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T06:42:23,379 DEBUG [RS:1;e09398052c91:46343 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e09398052c91,46343,1731134543044 2024-11-09T06:42:23,379 DEBUG [RS:1;e09398052c91:46343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,46343,1731134543044' 2024-11-09T06:42:23,379 DEBUG [RS:1;e09398052c91:46343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T06:42:23,380 DEBUG [RS:1;e09398052c91:46343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T06:42:23,380 DEBUG [RS:1;e09398052c91:46343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T06:42:23,380 INFO [RS:1;e09398052c91:46343 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T06:42:23,380 INFO [RS:1;e09398052c91:46343 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T06:42:23,398 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,398 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,399 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(1482): Serving as e09398052c91,34377,1731134543015, RpcServer on e09398052c91/172.17.0.2:34377, sessionid=0x100fb76bef10001 2024-11-09T06:42:23,399 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(1482): Serving as e09398052c91,44805,1731134543074, RpcServer on e09398052c91/172.17.0.2:44805, sessionid=0x100fb76bef10003 2024-11-09T06:42:23,399 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T06:42:23,399 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T06:42:23,399 DEBUG [RS:2;e09398052c91:44805 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e09398052c91,44805,1731134543074 2024-11-09T06:42:23,399 DEBUG [RS:0;e09398052c91:34377 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e09398052c91,34377,1731134543015 2024-11-09T06:42:23,399 DEBUG [RS:0;e09398052c91:34377 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,34377,1731134543015' 2024-11-09T06:42:23,399 DEBUG [RS:2;e09398052c91:44805 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,44805,1731134543074' 2024-11-09T06:42:23,399 DEBUG [RS:0;e09398052c91:34377 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T06:42:23,399 DEBUG [RS:2;e09398052c91:44805 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T06:42:23,400 DEBUG [RS:0;e09398052c91:34377 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T06:42:23,400 DEBUG [RS:2;e09398052c91:44805 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T06:42:23,400 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T06:42:23,400 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T06:42:23,400 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T06:42:23,400 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T06:42:23,400 DEBUG [RS:0;e09398052c91:34377 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e09398052c91,34377,1731134543015 2024-11-09T06:42:23,400 DEBUG [RS:2;e09398052c91:44805 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e09398052c91,44805,1731134543074 2024-11-09T06:42:23,400 DEBUG [RS:0;e09398052c91:34377 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,34377,1731134543015' 2024-11-09T06:42:23,400 DEBUG [RS:2;e09398052c91:44805 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e09398052c91,44805,1731134543074' 2024-11-09T06:42:23,400 DEBUG [RS:0;e09398052c91:34377 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T06:42:23,400 DEBUG [RS:2;e09398052c91:44805 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T06:42:23,401 DEBUG [RS:0;e09398052c91:34377 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T06:42:23,401 DEBUG [RS:2;e09398052c91:44805 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T06:42:23,401 DEBUG [RS:0;e09398052c91:34377 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T06:42:23,401 INFO [RS:0;e09398052c91:34377 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T06:42:23,401 DEBUG [RS:2;e09398052c91:44805 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T06:42:23,401 INFO [RS:0;e09398052c91:34377 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T06:42:23,401 INFO [RS:2;e09398052c91:44805 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T06:42:23,401 INFO [RS:2;e09398052c91:44805 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T06:42:23,483 INFO [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C46343%2C1731134543044, suffix=, logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,46343,1731134543044, archiveDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs, maxLogs=32 2024-11-09T06:42:23,485 INFO [RS:1;e09398052c91:46343 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e09398052c91%2C46343%2C1731134543044.1731134543485 2024-11-09T06:42:23,487 WARN [e09398052c91:33111 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-09T06:42:23,493 INFO [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,46343,1731134543044/e09398052c91%2C46343%2C1731134543044.1731134543485 2024-11-09T06:42:23,496 DEBUG [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42245:42245),(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:40087:40087)] 2024-11-09T06:42:23,504 INFO [RS:0;e09398052c91:34377 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C34377%2C1731134543015, suffix=, logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,34377,1731134543015, archiveDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs, maxLogs=32 2024-11-09T06:42:23,504 INFO [RS:2;e09398052c91:44805 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C44805%2C1731134543074, suffix=, logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,44805,1731134543074, archiveDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs, maxLogs=32 2024-11-09T06:42:23,506 INFO [RS:0;e09398052c91:34377 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e09398052c91%2C34377%2C1731134543015.1731134543506 2024-11-09T06:42:23,507 INFO [RS:2;e09398052c91:44805 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e09398052c91%2C44805%2C1731134543074.1731134543506 2024-11-09T06:42:23,520 INFO [RS:2;e09398052c91:44805 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,44805,1731134543074/e09398052c91%2C44805%2C1731134543074.1731134543506 2024-11-09T06:42:23,520 INFO [RS:0;e09398052c91:34377 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,34377,1731134543015/e09398052c91%2C34377%2C1731134543015.1731134543506 2024-11-09T06:42:23,526 DEBUG [RS:2;e09398052c91:44805 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:40087:40087),(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-09T06:42:23,527 DEBUG [RS:0;e09398052c91:34377 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:42245:42245),(127.0.0.1/127.0.0.1:40087:40087)] 2024-11-09T06:42:23,737 DEBUG [e09398052c91:33111 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T06:42:23,737 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(204): Hosts are {e09398052c91=0} racks are {/default-rack=0} 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T06:42:23,740 INFO [e09398052c91:33111 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T06:42:23,740 INFO [e09398052c91:33111 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T06:42:23,740 INFO [e09398052c91:33111 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T06:42:23,740 DEBUG [e09398052c91:33111 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T06:42:23,741 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e09398052c91,46343,1731134543044 2024-11-09T06:42:23,742 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e09398052c91,46343,1731134543044, state=OPENING 2024-11-09T06:42:23,744 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T06:42:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:23,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,746 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T06:42:23,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e09398052c91,46343,1731134543044}] 2024-11-09T06:42:23,900 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T06:42:23,902 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33869, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T06:42:23,906 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T06:42:23,907 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T06:42:23,909 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e09398052c91%2C46343%2C1731134543044.meta, suffix=.meta, logDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,46343,1731134543044, archiveDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs, maxLogs=32 2024-11-09T06:42:23,910 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e09398052c91%2C46343%2C1731134543044.meta.1731134543910.meta 2024-11-09T06:42:23,918 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/WALs/e09398052c91,46343,1731134543044/e09398052c91%2C46343%2C1731134543044.meta.1731134543910.meta 2024-11-09T06:42:23,923 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36247:36247),(127.0.0.1/127.0.0.1:42245:42245),(127.0.0.1/127.0.0.1:40087:40087)] 2024-11-09T06:42:23,924 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T06:42:23,924 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T06:42:23,924 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T06:42:23,924 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T06:42:23,924 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T06:42:23,924 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:23,925 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T06:42:23,925 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T06:42:23,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T06:42:23,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T06:42:23,928 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T06:42:23,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T06:42:23,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T06:42:23,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T06:42:23,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T06:42:23,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T06:42:23,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:23,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T06:42:23,933 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T06:42:23,934 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740 2024-11-09T06:42:23,935 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740 2024-11-09T06:42:23,937 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T06:42:23,937 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T06:42:23,937 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T06:42:23,939 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T06:42:23,940 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64029239, jitterRate=-0.04588998854160309}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T06:42:23,940 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T06:42:23,941 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731134543925Writing region info on filesystem at 1731134543925Initializing all the Stores at 1731134543926 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543926Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543926Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134543926Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731134543926Cleaning up temporary data from old regions at 1731134543937 (+11 ms)Running coprocessor post-open hooks at 1731134543940 (+3 ms)Region opened successfully at 1731134543941 (+1 ms) 2024-11-09T06:42:23,943 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731134543900 2024-11-09T06:42:23,946 DEBUG [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T06:42:23,946 INFO [RS_OPEN_META-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T06:42:23,947 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e09398052c91,46343,1731134543044 2024-11-09T06:42:23,949 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e09398052c91,46343,1731134543044, state=OPEN 2024-11-09T06:42:23,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:23,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:23,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:23,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T06:42:23,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T06:42:23,953 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e09398052c91,46343,1731134543044 2024-11-09T06:42:23,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T06:42:23,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e09398052c91,46343,1731134543044 in 208 msec 2024-11-09T06:42:23,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T06:42:23,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 626 msec 2024-11-09T06:42:23,963 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T06:42:23,963 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T06:42:23,965 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T06:42:23,965 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e09398052c91,46343,1731134543044, seqNum=-1] 2024-11-09T06:42:23,966 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T06:42:23,968 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39425, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T06:42:23,975 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 708 msec 2024-11-09T06:42:23,976 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731134543976, completionTime=-1 2024-11-09T06:42:23,976 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T06:42:23,976 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-09T06:42:23,978 INFO [master/e09398052c91:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-09T06:42:23,978 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731134603978 2024-11-09T06:42:23,978 INFO [master/e09398052c91:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731134663978 2024-11-09T06:42:23,978 INFO [master/e09398052c91:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-09T06:42:23,979 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33111,1731134542965-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,979 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33111,1731134542965-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,979 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33111,1731134542965-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,979 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e09398052c91:33111, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,979 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,979 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,982 DEBUG [master/e09398052c91:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T06:42:23,984 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.877sec 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33111,1731134542965-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T06:42:23,985 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33111,1731134542965-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T06:42:23,988 DEBUG [master/e09398052c91:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T06:42:23,988 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T06:42:23,988 INFO [master/e09398052c91:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e09398052c91,33111,1731134542965-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T06:42:23,990 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e501035, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T06:42:23,990 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e09398052c91,33111,-1 for getting cluster id 2024-11-09T06:42:23,990 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T06:42:23,991 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af1edeec-714f-4394-ba78-2f6dc023dd9f' 2024-11-09T06:42:23,992 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T06:42:23,992 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af1edeec-714f-4394-ba78-2f6dc023dd9f" 2024-11-09T06:42:23,993 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c20c5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T06:42:23,993 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e09398052c91,33111,-1] 2024-11-09T06:42:23,993 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T06:42:23,994 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:23,996 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T06:42:23,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@722fe5c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T06:42:23,997 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T06:42:23,999 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e09398052c91,46343,1731134543044, seqNum=-1] 2024-11-09T06:42:23,999 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T06:42:24,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T06:42:24,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e09398052c91,33111,1731134542965 2024-11-09T06:42:24,005 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T06:42:24,006 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is e09398052c91,33111,1731134542965 2024-11-09T06:42:24,006 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4d84f7ac 2024-11-09T06:42:24,006 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T06:42:24,008 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T06:42:24,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T06:42:24,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T06:42:24,013 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T06:42:24,013 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:24,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T06:42:24,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:24,014 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T06:42:24,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741837_1013 (size=392) 2024-11-09T06:42:24,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741837_1013 (size=392) 2024-11-09T06:42:24,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741837_1013 (size=392) 2024-11-09T06:42:24,030 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9738fa144c6b4a77ab7012b39f6b5ad3, NAME => 'TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e 2024-11-09T06:42:24,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741838_1014 (size=51) 2024-11-09T06:42:24,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741838_1014 (size=51) 2024-11-09T06:42:24,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741838_1014 (size=51) 2024-11-09T06:42:24,045 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:24,045 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 9738fa144c6b4a77ab7012b39f6b5ad3, disabling compactions & flushes 2024-11-09T06:42:24,045 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,045 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,045 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. after waiting 0 ms 2024-11-09T06:42:24,045 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,045 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,045 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9738fa144c6b4a77ab7012b39f6b5ad3: Waiting for close lock at 1731134544045Disabling compacts and flushes for region at 1731134544045Disabling writes for close at 1731134544045Writing region close event to WAL at 1731134544045Closed at 1731134544045 2024-11-09T06:42:24,047 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T06:42:24,047 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731134544047"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731134544047"}]},"ts":"1731134544047"} 2024-11-09T06:42:24,051 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T06:42:24,053 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T06:42:24,053 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731134544053"}]},"ts":"1731134544053"} 2024-11-09T06:42:24,056 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T06:42:24,057 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {e09398052c91=0} racks are {/default-rack=0} 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T06:42:24,058 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T06:42:24,058 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T06:42:24,058 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T06:42:24,058 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T06:42:24,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9738fa144c6b4a77ab7012b39f6b5ad3, ASSIGN}] 2024-11-09T06:42:24,061 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9738fa144c6b4a77ab7012b39f6b5ad3, ASSIGN 2024-11-09T06:42:24,062 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9738fa144c6b4a77ab7012b39f6b5ad3, ASSIGN; state=OFFLINE, location=e09398052c91,46343,1731134543044; forceNewPlan=false, retain=false 2024-11-09T06:42:24,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:24,213 INFO [e09398052c91:33111 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T06:42:24,213 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9738fa144c6b4a77ab7012b39f6b5ad3, regionState=OPENING, regionLocation=e09398052c91,46343,1731134543044 2024-11-09T06:42:24,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9738fa144c6b4a77ab7012b39f6b5ad3, ASSIGN because future has completed 2024-11-09T06:42:24,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9738fa144c6b4a77ab7012b39f6b5ad3, server=e09398052c91,46343,1731134543044}] 2024-11-09T06:42:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:24,380 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,380 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9738fa144c6b4a77ab7012b39f6b5ad3, NAME => 'TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3.', STARTKEY => '', ENDKEY => ''} 2024-11-09T06:42:24,381 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,381 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T06:42:24,381 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,381 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,383 INFO [StoreOpener-9738fa144c6b4a77ab7012b39f6b5ad3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,384 INFO [StoreOpener-9738fa144c6b4a77ab7012b39f6b5ad3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9738fa144c6b4a77ab7012b39f6b5ad3 columnFamilyName cf 2024-11-09T06:42:24,384 DEBUG [StoreOpener-9738fa144c6b4a77ab7012b39f6b5ad3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T06:42:24,385 INFO [StoreOpener-9738fa144c6b4a77ab7012b39f6b5ad3-1 {}] regionserver.HStore(327): Store=9738fa144c6b4a77ab7012b39f6b5ad3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T06:42:24,385 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,386 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,386 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,387 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,387 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,389 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,392 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T06:42:24,392 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9738fa144c6b4a77ab7012b39f6b5ad3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65324655, jitterRate=-0.026586785912513733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T06:42:24,392 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,394 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9738fa144c6b4a77ab7012b39f6b5ad3: Running coprocessor pre-open hook at 1731134544381Writing region info on filesystem at 1731134544381Initializing all the Stores at 1731134544382 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731134544382Cleaning up temporary data from old regions at 1731134544387 (+5 ms)Running coprocessor post-open hooks at 1731134544392 (+5 ms)Region opened successfully at 1731134544394 (+2 ms) 2024-11-09T06:42:24,395 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3., pid=6, masterSystemTime=1731134544374 2024-11-09T06:42:24,399 DEBUG [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,399 INFO [RS_OPEN_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,400 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9738fa144c6b4a77ab7012b39f6b5ad3, regionState=OPEN, openSeqNum=2, regionLocation=e09398052c91,46343,1731134543044 2024-11-09T06:42:24,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9738fa144c6b4a77ab7012b39f6b5ad3, server=e09398052c91,46343,1731134543044 because future has completed 2024-11-09T06:42:24,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T06:42:24,410 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9738fa144c6b4a77ab7012b39f6b5ad3, server=e09398052c91,46343,1731134543044 in 185 msec 2024-11-09T06:42:24,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T06:42:24,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9738fa144c6b4a77ab7012b39f6b5ad3, ASSIGN in 352 msec 2024-11-09T06:42:24,415 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T06:42:24,415 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731134544415"}]},"ts":"1731134544415"} 2024-11-09T06:42:24,418 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T06:42:24,420 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T06:42:24,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 411 msec 2024-11-09T06:42:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T06:42:24,638 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T06:42:24,638 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T06:42:24,638 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T06:42:24,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T06:42:24,642 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T06:42:24,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T06:42:24,645 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3., hostname=e09398052c91,46343,1731134543044, seqNum=2] 2024-11-09T06:42:24,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T06:42:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T06:42:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:24,653 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T06:42:24,654 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T06:42:24,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T06:42:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:24,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46343 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T06:42:24,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,809 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 9738fa144c6b4a77ab7012b39f6b5ad3 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T06:42:24,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/.tmp/cf/7c3f67ad97934c399520d6725ef9b8c9 is 36, key is row/cf:cq/1731134544647/Put/seqid=0 2024-11-09T06:42:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741839_1015 (size=4787) 2024-11-09T06:42:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741839_1015 (size=4787) 2024-11-09T06:42:24,836 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/.tmp/cf/7c3f67ad97934c399520d6725ef9b8c9 2024-11-09T06:42:24,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741839_1015 (size=4787) 2024-11-09T06:42:24,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/.tmp/cf/7c3f67ad97934c399520d6725ef9b8c9 as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/cf/7c3f67ad97934c399520d6725ef9b8c9 2024-11-09T06:42:24,854 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/cf/7c3f67ad97934c399520d6725ef9b8c9, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T06:42:24,856 INFO [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 9738fa144c6b4a77ab7012b39f6b5ad3 in 47ms, sequenceid=5, compaction requested=false 2024-11-09T06:42:24,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 9738fa144c6b4a77ab7012b39f6b5ad3: 2024-11-09T06:42:24,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e09398052c91:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T06:42:24,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T06:42:24,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T06:42:24,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-11-09T06:42:24,867 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 215 msec 2024-11-09T06:42:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33111 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T06:42:24,968 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T06:42:24,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T06:42:24,972 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T06:42:24,972 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:24,972 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,972 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,972 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T06:42:24,972 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T06:42:24,973 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2075967471, stopped=false 2024-11-09T06:42:24,973 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e09398052c91,33111,1731134542965 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:24,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:24,975 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T06:42:24,975 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T06:42:24,975 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:24,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:24,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:24,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:24,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e09398052c91,34377,1731134543015' ***** 2024-11-09T06:42:24,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T06:42:24,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e09398052c91,46343,1731134543044' ***** 2024-11-09T06:42:24,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T06:42:24,976 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T06:42:24,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e09398052c91,44805,1731134543074' ***** 2024-11-09T06:42:24,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T06:42:24,976 INFO [RS:0;e09398052c91:34377 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T06:42:24,976 INFO [RS:2;e09398052c91:44805 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T06:42:24,976 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T06:42:24,976 INFO [RS:0;e09398052c91:34377 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T06:42:24,976 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T06:42:24,976 INFO [RS:0;e09398052c91:34377 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T06:42:24,976 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(959): stopping server e09398052c91,34377,1731134543015 2024-11-09T06:42:24,976 INFO [RS:1;e09398052c91:46343 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T06:42:24,976 INFO [RS:2;e09398052c91:44805 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T06:42:24,977 INFO [RS:0;e09398052c91:34377 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:24,977 INFO [RS:2;e09398052c91:44805 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T06:42:24,977 INFO [RS:1;e09398052c91:46343 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T06:42:24,977 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T06:42:24,977 INFO [RS:0;e09398052c91:34377 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e09398052c91:34377. 2024-11-09T06:42:24,977 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(959): stopping server e09398052c91,44805,1731134543074 2024-11-09T06:42:24,977 INFO [RS:1;e09398052c91:46343 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T06:42:24,977 INFO [RS:2;e09398052c91:44805 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:24,977 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(3091): Received CLOSE for 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,977 DEBUG [RS:0;e09398052c91:34377 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:24,977 INFO [RS:2;e09398052c91:44805 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;e09398052c91:44805. 2024-11-09T06:42:24,977 DEBUG [RS:0;e09398052c91:34377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,977 DEBUG [RS:2;e09398052c91:44805 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:24,977 DEBUG [RS:2;e09398052c91:44805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,977 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(976): stopping server e09398052c91,34377,1731134543015; all regions closed. 2024-11-09T06:42:24,977 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(976): stopping server e09398052c91,44805,1731134543074; all regions closed. 2024-11-09T06:42:24,977 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(959): stopping server e09398052c91,46343,1731134543044 2024-11-09T06:42:24,977 INFO [RS:1;e09398052c91:46343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:24,977 INFO [RS:1;e09398052c91:46343 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e09398052c91:46343. 2024-11-09T06:42:24,978 DEBUG [RS:1;e09398052c91:46343 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T06:42:24,978 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 DEBUG [RS:1;e09398052c91:46343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,978 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9738fa144c6b4a77ab7012b39f6b5ad3, disabling compactions & flushes 2024-11-09T06:42:24,978 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 INFO [RS:1;e09398052c91:46343 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T06:42:24,978 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 INFO [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,978 INFO [RS:1;e09398052c91:46343 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T06:42:24,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 INFO [RS:1;e09398052c91:46343 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T06:42:24,978 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T06:42:24,978 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. after waiting 0 ms 2024-11-09T06:42:24,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,978 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-09T06:42:24,978 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1325): Online Regions={9738fa144c6b4a77ab7012b39f6b5ad3=TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3., 1588230740=hbase:meta,,1.1588230740} 2024-11-09T06:42:24,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:24,979 DEBUG [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9738fa144c6b4a77ab7012b39f6b5ad3 2024-11-09T06:42:24,979 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T06:42:24,979 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T06:42:24,979 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T06:42:24,979 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T06:42:24,979 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T06:42:24,979 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T06:42:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741834_1010 (size=93) 2024-11-09T06:42:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741834_1010 (size=93) 2024-11-09T06:42:24,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741835_1011 (size=93) 2024-11-09T06:42:24,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741834_1010 (size=93) 2024-11-09T06:42:24,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741835_1011 (size=93) 2024-11-09T06:42:24,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741835_1011 (size=93) 2024-11-09T06:42:24,987 DEBUG [RS:0;e09398052c91:34377 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs 2024-11-09T06:42:24,987 INFO [RS:0;e09398052c91:34377 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e09398052c91%2C34377%2C1731134543015:(num 1731134543506) 2024-11-09T06:42:24,987 DEBUG [RS:0;e09398052c91:34377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,987 INFO [RS:0;e09398052c91:34377 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:24,987 INFO [RS:0;e09398052c91:34377 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:24,987 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/default/TestHBaseWalOnEC/9738fa144c6b4a77ab7012b39f6b5ad3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T06:42:24,987 INFO [RS:0;e09398052c91:34377 {}] hbase.ChoreService(370): Chore service for: regionserver/e09398052c91:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:24,988 INFO [RS:0;e09398052c91:34377 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T06:42:24,988 INFO [RS:0;e09398052c91:34377 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T06:42:24,988 INFO [RS:0;e09398052c91:34377 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T06:42:24,988 INFO [RS:0;e09398052c91:34377 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:24,988 INFO [regionserver/e09398052c91:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:24,988 INFO [RS:0;e09398052c91:34377 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34377 2024-11-09T06:42:24,988 DEBUG [RS:2;e09398052c91:44805 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e09398052c91%2C44805%2C1731134543074:(num 1731134543506) 2024-11-09T06:42:24,989 DEBUG [RS:2;e09398052c91:44805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:24,989 INFO [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,989 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9738fa144c6b4a77ab7012b39f6b5ad3: Waiting for close lock at 1731134544978Running coprocessor pre-close hooks at 1731134544978Disabling compacts and flushes for region at 1731134544978Disabling writes for close at 1731134544978Writing region close event to WAL at 1731134544979 (+1 ms)Running coprocessor post-close hooks at 1731134544988 (+9 ms)Closed at 1731134544989 (+1 ms) 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] hbase.ChoreService(370): Chore service for: regionserver/e09398052c91:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:24,989 DEBUG [RS_CLOSE_REGION-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3. 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T06:42:24,989 INFO [regionserver/e09398052c91:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T06:42:24,989 INFO [RS:2;e09398052c91:44805 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:24,990 INFO [RS:2;e09398052c91:44805 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44805 2024-11-09T06:42:24,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e09398052c91,34377,1731134543015 2024-11-09T06:42:24,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:24,990 INFO [RS:0;e09398052c91:34377 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:24,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e09398052c91,44805,1731134543074 2024-11-09T06:42:24,991 INFO [RS:2;e09398052c91:44805 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:24,993 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e09398052c91,34377,1731134543015] 2024-11-09T06:42:24,994 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e09398052c91,34377,1731134543015 already deleted, retry=false 2024-11-09T06:42:24,994 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e09398052c91,34377,1731134543015 expired; onlineServers=2 2024-11-09T06:42:24,994 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e09398052c91,44805,1731134543074] 2024-11-09T06:42:24,995 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e09398052c91,44805,1731134543074 already deleted, retry=false 2024-11-09T06:42:24,995 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e09398052c91,44805,1731134543074 expired; onlineServers=1 2024-11-09T06:42:25,003 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/info/cfe93adfb4e24492830fd963426a4ef7 is 153, key is TestHBaseWalOnEC,,1731134544008.9738fa144c6b4a77ab7012b39f6b5ad3./info:regioninfo/1731134544400/Put/seqid=0 2024-11-09T06:42:25,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741840_1016 (size=6637) 2024-11-09T06:42:25,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741840_1016 (size=6637) 2024-11-09T06:42:25,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741840_1016 (size=6637) 2024-11-09T06:42:25,011 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/info/cfe93adfb4e24492830fd963426a4ef7 2024-11-09T06:42:25,035 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/ns/662254f54d9f40f580a1cab7e807ba27 is 43, key is default/ns:d/1731134543968/Put/seqid=0 2024-11-09T06:42:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741841_1017 (size=5153) 2024-11-09T06:42:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741841_1017 (size=5153) 2024-11-09T06:42:25,043 INFO [regionserver/e09398052c91:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741841_1017 (size=5153) 2024-11-09T06:42:25,044 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/ns/662254f54d9f40f580a1cab7e807ba27 2024-11-09T06:42:25,053 INFO [regionserver/e09398052c91:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:25,053 INFO [regionserver/e09398052c91:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:25,067 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/table/852d759152f445b68744d86fb3aebcb6 is 52, key is TestHBaseWalOnEC/table:state/1731134544415/Put/seqid=0 2024-11-09T06:42:25,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741842_1018 (size=5249) 2024-11-09T06:42:25,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741842_1018 (size=5249) 2024-11-09T06:42:25,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741842_1018 (size=5249) 2024-11-09T06:42:25,075 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/table/852d759152f445b68744d86fb3aebcb6 2024-11-09T06:42:25,083 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/info/cfe93adfb4e24492830fd963426a4ef7 as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/info/cfe93adfb4e24492830fd963426a4ef7 2024-11-09T06:42:25,089 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/info/cfe93adfb4e24492830fd963426a4ef7, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T06:42:25,090 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/ns/662254f54d9f40f580a1cab7e807ba27 as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/ns/662254f54d9f40f580a1cab7e807ba27 2024-11-09T06:42:25,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34377-0x100fb76bef10001, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,093 INFO [RS:0;e09398052c91:34377 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:25,093 INFO [RS:0;e09398052c91:34377 {}] regionserver.HRegionServer(1031): Exiting; stopping=e09398052c91,34377,1731134543015; zookeeper connection closed. 2024-11-09T06:42:25,094 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d54e8bf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d54e8bf 2024-11-09T06:42:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,094 INFO [RS:2;e09398052c91:44805 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:25,094 INFO [RS:2;e09398052c91:44805 {}] regionserver.HRegionServer(1031): Exiting; stopping=e09398052c91,44805,1731134543074; zookeeper connection closed. 2024-11-09T06:42:25,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44805-0x100fb76bef10003, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,094 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7288dc55 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7288dc55 2024-11-09T06:42:25,097 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/ns/662254f54d9f40f580a1cab7e807ba27, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T06:42:25,098 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/.tmp/table/852d759152f445b68744d86fb3aebcb6 as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/table/852d759152f445b68744d86fb3aebcb6 2024-11-09T06:42:25,105 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/table/852d759152f445b68744d86fb3aebcb6, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T06:42:25,106 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false 2024-11-09T06:42:25,112 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T06:42:25,112 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T06:42:25,112 INFO [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T06:42:25,113 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731134544978Running coprocessor pre-close hooks at 1731134544978Disabling compacts and flushes for region at 1731134544978Disabling writes for close at 1731134544979 (+1 ms)Obtaining lock to block concurrent updates at 1731134544979Preparing flush snapshotting stores in 1588230740 at 1731134544979Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731134544980 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731134544981 (+1 ms)Flushing 1588230740/info: creating writer at 1731134544981Flushing 1588230740/info: appending metadata at 1731134545002 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731134545002Flushing 1588230740/ns: creating writer at 1731134545019 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731134545035 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731134545035Flushing 1588230740/table: creating writer at 1731134545051 (+16 ms)Flushing 1588230740/table: appending metadata at 1731134545066 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731134545066Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@146bbe01: reopening flushed file at 1731134545082 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73c91cec: reopening flushed file at 1731134545089 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62d200a3: reopening flushed file at 1731134545097 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false at 1731134545106 (+9 ms)Writing region close event to WAL at 1731134545107 (+1 ms)Running coprocessor post-close hooks at 1731134545112 (+5 ms)Closed at 1731134545112 2024-11-09T06:42:25,113 DEBUG [RS_CLOSE_META-regionserver/e09398052c91:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T06:42:25,124 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-09T06:42:25,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T06:42:25,179 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(976): stopping server e09398052c91,46343,1731134543044; all regions closed. 2024-11-09T06:42:25,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741836_1012 (size=2751) 2024-11-09T06:42:25,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741836_1012 (size=2751) 2024-11-09T06:42:25,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741836_1012 (size=2751) 2024-11-09T06:42:25,187 DEBUG [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs 2024-11-09T06:42:25,187 INFO [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e09398052c91%2C46343%2C1731134543044.meta:.meta(num 1731134543910) 2024-11-09T06:42:25,187 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,187 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,188 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741833_1009 (size=1298) 2024-11-09T06:42:25,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741833_1009 (size=1298) 2024-11-09T06:42:25,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741833_1009 (size=1298) 2024-11-09T06:42:25,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T06:42:25,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T06:42:25,194 DEBUG [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/oldWALs 2024-11-09T06:42:25,194 INFO [RS:1;e09398052c91:46343 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e09398052c91%2C46343%2C1731134543044:(num 1731134543485) 2024-11-09T06:42:25,194 DEBUG [RS:1;e09398052c91:46343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T06:42:25,194 INFO [RS:1;e09398052c91:46343 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T06:42:25,194 INFO [RS:1;e09398052c91:46343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:25,194 INFO [RS:1;e09398052c91:46343 {}] hbase.ChoreService(370): Chore service for: regionserver/e09398052c91:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:25,195 INFO [RS:1;e09398052c91:46343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:25,195 INFO [regionserver/e09398052c91:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:25,195 INFO [RS:1;e09398052c91:46343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46343 2024-11-09T06:42:25,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e09398052c91,46343,1731134543044 2024-11-09T06:42:25,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T06:42:25,198 INFO [RS:1;e09398052c91:46343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:25,200 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e09398052c91,46343,1731134543044] 2024-11-09T06:42:25,201 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e09398052c91,46343,1731134543044 already deleted, retry=false 2024-11-09T06:42:25,201 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e09398052c91,46343,1731134543044 expired; onlineServers=0 2024-11-09T06:42:25,201 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e09398052c91,33111,1731134542965' ***** 2024-11-09T06:42:25,202 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T06:42:25,202 INFO [M:0;e09398052c91:33111 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T06:42:25,202 INFO [M:0;e09398052c91:33111 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T06:42:25,202 DEBUG [M:0;e09398052c91:33111 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T06:42:25,202 DEBUG [M:0;e09398052c91:33111 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T06:42:25,202 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T06:42:25,202 DEBUG [master/e09398052c91:0:becomeActiveMaster-HFileCleaner.large.0-1731134543272 {}] cleaner.HFileCleaner(306): Exit Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.large.0-1731134543272,5,FailOnTimeoutGroup] 2024-11-09T06:42:25,202 DEBUG [master/e09398052c91:0:becomeActiveMaster-HFileCleaner.small.0-1731134543273 {}] cleaner.HFileCleaner(306): Exit Thread[master/e09398052c91:0:becomeActiveMaster-HFileCleaner.small.0-1731134543273,5,FailOnTimeoutGroup] 2024-11-09T06:42:25,202 INFO [M:0;e09398052c91:33111 {}] hbase.ChoreService(370): Chore service for: master/e09398052c91:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T06:42:25,202 INFO [M:0;e09398052c91:33111 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T06:42:25,202 DEBUG [M:0;e09398052c91:33111 {}] master.HMaster(1795): Stopping service threads 2024-11-09T06:42:25,202 INFO [M:0;e09398052c91:33111 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T06:42:25,202 INFO [M:0;e09398052c91:33111 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T06:42:25,203 INFO [M:0;e09398052c91:33111 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T06:42:25,203 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T06:42:25,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T06:42:25,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T06:42:25,204 DEBUG [M:0;e09398052c91:33111 {}] zookeeper.ZKUtil(347): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T06:42:25,204 WARN [M:0;e09398052c91:33111 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T06:42:25,205 INFO [M:0;e09398052c91:33111 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/.lastflushedseqids 2024-11-09T06:42:25,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741843_1019 (size=127) 2024-11-09T06:42:25,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741843_1019 (size=127) 2024-11-09T06:42:25,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741843_1019 (size=127) 2024-11-09T06:42:25,220 INFO [M:0;e09398052c91:33111 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T06:42:25,220 INFO [M:0;e09398052c91:33111 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T06:42:25,220 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T06:42:25,220 INFO [M:0;e09398052c91:33111 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:25,220 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:25,221 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T06:42:25,221 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:25,221 INFO [M:0;e09398052c91:33111 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-09T06:42:25,239 DEBUG [M:0;e09398052c91:33111 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ce337492a2fa4c6b88e3e7cc5ceaf395 is 82, key is hbase:meta,,1/info:regioninfo/1731134543947/Put/seqid=0 2024-11-09T06:42:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741844_1020 (size=5672) 2024-11-09T06:42:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741844_1020 (size=5672) 2024-11-09T06:42:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741844_1020 (size=5672) 2024-11-09T06:42:25,249 INFO [M:0;e09398052c91:33111 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ce337492a2fa4c6b88e3e7cc5ceaf395 2024-11-09T06:42:25,273 DEBUG [M:0;e09398052c91:33111 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e237bac442fc4e85802bbbf8dfb5e41c is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731134544421/Put/seqid=0 2024-11-09T06:42:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741845_1021 (size=6440) 2024-11-09T06:42:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741845_1021 (size=6440) 2024-11-09T06:42:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741845_1021 (size=6440) 2024-11-09T06:42:25,282 INFO [M:0;e09398052c91:33111 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e237bac442fc4e85802bbbf8dfb5e41c 2024-11-09T06:42:25,300 INFO [RS:1;e09398052c91:46343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:25,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,300 INFO [RS:1;e09398052c91:46343 {}] regionserver.HRegionServer(1031): Exiting; stopping=e09398052c91,46343,1731134543044; zookeeper connection closed. 2024-11-09T06:42:25,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46343-0x100fb76bef10002, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,300 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d795c5b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d795c5b 2024-11-09T06:42:25,301 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T06:42:25,305 DEBUG [M:0;e09398052c91:33111 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1112755253a546b48834f0eeb881fdec is 69, key is e09398052c91,34377,1731134543015/rs:state/1731134543322/Put/seqid=0 2024-11-09T06:42:25,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741846_1022 (size=5294) 2024-11-09T06:42:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741846_1022 (size=5294) 2024-11-09T06:42:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741846_1022 (size=5294) 2024-11-09T06:42:25,314 INFO [M:0;e09398052c91:33111 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1112755253a546b48834f0eeb881fdec 2024-11-09T06:42:25,322 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ce337492a2fa4c6b88e3e7cc5ceaf395 as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ce337492a2fa4c6b88e3e7cc5ceaf395 2024-11-09T06:42:25,330 INFO [M:0;e09398052c91:33111 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ce337492a2fa4c6b88e3e7cc5ceaf395, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T06:42:25,331 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e237bac442fc4e85802bbbf8dfb5e41c as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e237bac442fc4e85802bbbf8dfb5e41c 2024-11-09T06:42:25,339 INFO [M:0;e09398052c91:33111 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e237bac442fc4e85802bbbf8dfb5e41c, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T06:42:25,340 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1112755253a546b48834f0eeb881fdec as hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1112755253a546b48834f0eeb881fdec 2024-11-09T06:42:25,348 INFO [M:0;e09398052c91:33111 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42727/user/jenkins/test-data/3f1b2993-0caa-acfd-af15-446ca5415d9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1112755253a546b48834f0eeb881fdec, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T06:42:25,350 INFO [M:0;e09398052c91:33111 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=72, compaction requested=false 2024-11-09T06:42:25,352 INFO [M:0;e09398052c91:33111 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T06:42:25,352 DEBUG [M:0;e09398052c91:33111 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731134545220Disabling compacts and flushes for region at 1731134545220Disabling writes for close at 1731134545221 (+1 ms)Obtaining lock to block concurrent updates at 1731134545221Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731134545221Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731134545221Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731134545222 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731134545222Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731134545239 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731134545239Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731134545256 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731134545273 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731134545273Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731134545288 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731134545305 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731134545305Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b81841: reopening flushed file at 1731134545321 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16c4a7ac: reopening flushed file at 1731134545330 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bb27fa8: reopening flushed file at 1731134545339 (+9 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=72, compaction requested=false at 1731134545350 (+11 ms)Writing region close event to WAL at 1731134545352 (+2 ms)Closed at 1731134545352 2024-11-09T06:42:25,352 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,352 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,353 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,353 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,353 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T06:42:25,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40843 is added to blk_1073741830_1006 (size=32683) 2024-11-09T06:42:25,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41459 is added to blk_1073741830_1006 (size=32683) 2024-11-09T06:42:25,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36351 is added to blk_1073741830_1006 (size=32683) 2024-11-09T06:42:25,357 INFO [M:0;e09398052c91:33111 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T06:42:25,357 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T06:42:25,357 INFO [M:0;e09398052c91:33111 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33111 2024-11-09T06:42:25,358 INFO [M:0;e09398052c91:33111 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T06:42:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,463 INFO [M:0;e09398052c91:33111 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T06:42:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33111-0x100fb76bef10000, quorum=127.0.0.1:62482, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T06:42:25,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3364f2e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:25,466 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f76b201{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:25,466 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:25,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@427b8cb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:25,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b15d8c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:25,468 WARN [BP-358983239-172.17.0.2-1731134541970 heartbeating to localhost/127.0.0.1:42727 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T06:42:25,468 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T06:42:25,468 WARN [BP-358983239-172.17.0.2-1731134541970 heartbeating to localhost/127.0.0.1:42727 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-358983239-172.17.0.2-1731134541970 (Datanode Uuid 9396e4c7-a583-4715-abc7-a03a3609cda7) service to localhost/127.0.0.1:42727 2024-11-09T06:42:25,468 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T06:42:25,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data5/current/BP-358983239-172.17.0.2-1731134541970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:25,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data6/current/BP-358983239-172.17.0.2-1731134541970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:25,469 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T06:42:25,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@191f750f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:25,471 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6811044f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:25,471 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:25,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d788f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:25,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c077530{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:25,473 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T06:42:25,473 WARN [BP-358983239-172.17.0.2-1731134541970 heartbeating to localhost/127.0.0.1:42727 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T06:42:25,473 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T06:42:25,473 WARN [BP-358983239-172.17.0.2-1731134541970 heartbeating to localhost/127.0.0.1:42727 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-358983239-172.17.0.2-1731134541970 (Datanode Uuid 3aa875f0-3d5a-4219-9d51-9a0eefe11d00) service to localhost/127.0.0.1:42727 2024-11-09T06:42:25,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data3/current/BP-358983239-172.17.0.2-1731134541970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:25,474 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data4/current/BP-358983239-172.17.0.2-1731134541970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:25,474 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T06:42:25,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@377873f6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T06:42:25,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d2295b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:25,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:25,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23b354d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:25,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dae6551{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:25,478 WARN [BP-358983239-172.17.0.2-1731134541970 heartbeating to localhost/127.0.0.1:42727 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T06:42:25,478 WARN [BP-358983239-172.17.0.2-1731134541970 heartbeating to localhost/127.0.0.1:42727 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-358983239-172.17.0.2-1731134541970 (Datanode Uuid f5744ac6-c1b8-48fc-b937-aab444d5211d) service to localhost/127.0.0.1:42727 2024-11-09T06:42:25,478 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T06:42:25,478 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T06:42:25,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data1/current/BP-358983239-172.17.0.2-1731134541970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:25,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/cluster_df263a8f-ddc4-feba-dbb3-08dfbeef716d/data/data2/current/BP-358983239-172.17.0.2-1731134541970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T06:42:25,479 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T06:42:25,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3977a7da{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T06:42:25,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7094f416{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T06:42:25,489 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T06:42:25,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@467f22c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T06:42:25,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58cabbb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1545262f-be67-2a37-a19b-17630f873904/hadoop.log.dir/,STOPPED} 2024-11-09T06:42:25,497 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T06:42:25,522 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T06:42:25,529 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 88) - Thread LEAK? -, OpenFileDescriptor=519 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=43 (was 38) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2447 (was 2633)