2024-12-06 15:54:00,804 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-06 15:54:00,818 main DEBUG Took 0.011352 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 15:54:00,818 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 15:54:00,819 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 15:54:00,820 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 15:54:00,822 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,835 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 15:54:00,854 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,856 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,858 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,859 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,859 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,861 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,862 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,862 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,863 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,865 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,865 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,866 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,866 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,867 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,867 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,868 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,868 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,869 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,869 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,870 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,871 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 15:54:00,871 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,872 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 15:54:00,874 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 15:54:00,875 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 15:54:00,878 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 15:54:00,878 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 15:54:00,880 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 15:54:00,880 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 15:54:00,893 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 15:54:00,896 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 15:54:00,898 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 15:54:00,899 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 15:54:00,900 main DEBUG createAppenders(={Console}) 2024-12-06 15:54:00,901 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-06 15:54:00,901 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-06 15:54:00,901 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-06 15:54:00,902 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 15:54:00,902 main DEBUG OutputStream closed 2024-12-06 15:54:00,903 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 15:54:00,903 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 15:54:00,904 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-06 15:54:00,994 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 15:54:00,996 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 15:54:00,998 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 15:54:00,999 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 15:54:01,000 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 15:54:01,000 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 15:54:01,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 15:54:01,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 15:54:01,002 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 15:54:01,002 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 15:54:01,003 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 15:54:01,003 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 15:54:01,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 15:54:01,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 15:54:01,005 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 15:54:01,005 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 15:54:01,005 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 15:54:01,006 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 15:54:01,010 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 15:54:01,010 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-06 15:54:01,011 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 15:54:01,012 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-06T15:54:01,034 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-06 15:54:01,038 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 15:54:01,039 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T15:54:01,302 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26 2024-12-06T15:54:01,325 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148, deleteOnExit=true 2024-12-06T15:54:01,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/test.cache.data in system properties and HBase conf 2024-12-06T15:54:01,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T15:54:01,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir in system properties and HBase conf 2024-12-06T15:54:01,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T15:54:01,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T15:54:01,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T15:54:01,427 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T15:54:01,522 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T15:54:01,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T15:54:01,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T15:54:01,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T15:54:01,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T15:54:01,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T15:54:01,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T15:54:01,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T15:54:01,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T15:54:01,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T15:54:01,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/nfs.dump.dir in system properties and HBase conf 2024-12-06T15:54:01,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/java.io.tmpdir in system properties and HBase conf 2024-12-06T15:54:01,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T15:54:01,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T15:54:01,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T15:54:02,392 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T15:54:02,483 INFO [Time-limited test {}] log.Log(170): Logging initialized @2412ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T15:54:02,571 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:02,636 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:02,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:02,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:02,660 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T15:54:02,673 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:02,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:02,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:02,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/java.io.tmpdir/jetty-localhost-36985-hadoop-hdfs-3_4_1-tests_jar-_-any-6627786795005419944/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T15:54:02,887 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:36985} 2024-12-06T15:54:02,887 INFO [Time-limited test {}] server.Server(415): Started @2817ms 2024-12-06T15:54:03,285 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:03,293 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:03,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:03,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:03,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T15:54:03,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:03,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:03,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/java.io.tmpdir/jetty-localhost-36457-hadoop-hdfs-3_4_1-tests_jar-_-any-13903726336824754135/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:03,419 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:36457} 2024-12-06T15:54:03,420 INFO [Time-limited test {}] server.Server(415): Started @3350ms 2024-12-06T15:54:03,475 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T15:54:03,614 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:03,622 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:03,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:03,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:03,629 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T15:54:03,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:03,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:03,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/java.io.tmpdir/jetty-localhost-38907-hadoop-hdfs-3_4_1-tests_jar-_-any-17908585465119888270/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:03,784 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:38907} 2024-12-06T15:54:03,784 INFO [Time-limited test {}] server.Server(415): Started @3714ms 2024-12-06T15:54:03,786 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T15:54:03,833 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:03,838 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:03,843 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:03,844 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:03,844 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T15:54:03,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:03,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:03,951 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data4/current/BP-1561492056-172.17.0.2-1733500442151/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:03,951 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data1/current/BP-1561492056-172.17.0.2-1733500442151/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:03,951 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data2/current/BP-1561492056-172.17.0.2-1733500442151/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:03,951 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data3/current/BP-1561492056-172.17.0.2-1733500442151/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:04,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/java.io.tmpdir/jetty-localhost-40439-hadoop-hdfs-3_4_1-tests_jar-_-any-3665385110291015144/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:04,005 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:40439} 2024-12-06T15:54:04,005 INFO [Time-limited test {}] server.Server(415): Started @3936ms 2024-12-06T15:54:04,008 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T15:54:04,010 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T15:54:04,010 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T15:54:04,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c3d2cfd93b038db with lease ID 0xca14c9286e2b10de: Processing first storage report for DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c from datanode DatanodeRegistration(127.0.0.1:33463, datanodeUuid=70a35f20-8dd5-437c-b4a1-eb06be178832, infoPort=44199, infoSecurePort=0, ipcPort=33879, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151) 2024-12-06T15:54:04,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c3d2cfd93b038db with lease ID 0xca14c9286e2b10de: from storage DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c node DatanodeRegistration(127.0.0.1:33463, datanodeUuid=70a35f20-8dd5-437c-b4a1-eb06be178832, infoPort=44199, infoSecurePort=0, ipcPort=33879, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T15:54:04,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x196c385ba4180b16 with lease ID 0xca14c9286e2b10dd: Processing first storage report for DS-629ceda4-ec50-4137-b1e9-2fcee746f45c from datanode DatanodeRegistration(127.0.0.1:41117, datanodeUuid=91c46be6-ebf4-43d1-af0e-0c4c0673d55d, infoPort=35427, infoSecurePort=0, ipcPort=43247, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151) 2024-12-06T15:54:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x196c385ba4180b16 with lease ID 0xca14c9286e2b10dd: from storage DS-629ceda4-ec50-4137-b1e9-2fcee746f45c node DatanodeRegistration(127.0.0.1:41117, datanodeUuid=91c46be6-ebf4-43d1-af0e-0c4c0673d55d, infoPort=35427, infoSecurePort=0, ipcPort=43247, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c3d2cfd93b038db with lease ID 0xca14c9286e2b10de: Processing first storage report for DS-fd448a6d-3cf9-4e7c-923b-8ea4c74658f1 from datanode DatanodeRegistration(127.0.0.1:33463, datanodeUuid=70a35f20-8dd5-437c-b4a1-eb06be178832, infoPort=44199, infoSecurePort=0, ipcPort=33879, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151) 2024-12-06T15:54:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c3d2cfd93b038db with lease ID 0xca14c9286e2b10de: from storage DS-fd448a6d-3cf9-4e7c-923b-8ea4c74658f1 node DatanodeRegistration(127.0.0.1:33463, datanodeUuid=70a35f20-8dd5-437c-b4a1-eb06be178832, infoPort=44199, infoSecurePort=0, ipcPort=33879, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x196c385ba4180b16 with lease ID 0xca14c9286e2b10dd: Processing first storage report for DS-e028147a-b33d-4968-b510-bda0b797442b from datanode DatanodeRegistration(127.0.0.1:41117, datanodeUuid=91c46be6-ebf4-43d1-af0e-0c4c0673d55d, infoPort=35427, infoSecurePort=0, ipcPort=43247, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151) 2024-12-06T15:54:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x196c385ba4180b16 with lease ID 0xca14c9286e2b10dd: from storage DS-e028147a-b33d-4968-b510-bda0b797442b node DatanodeRegistration(127.0.0.1:41117, datanodeUuid=91c46be6-ebf4-43d1-af0e-0c4c0673d55d, infoPort=35427, infoSecurePort=0, ipcPort=43247, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:04,172 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data5/current/BP-1561492056-172.17.0.2-1733500442151/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:04,173 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data6/current/BP-1561492056-172.17.0.2-1733500442151/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:04,212 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T15:54:04,218 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38f9b3bce7d17a62 with lease ID 0xca14c9286e2b10df: Processing first storage report for DS-b2912c1d-690f-478d-98ab-c66243fe8a2b from datanode DatanodeRegistration(127.0.0.1:38889, datanodeUuid=d113bed3-53f6-4770-b39f-4a6bee037dce, infoPort=45483, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151) 2024-12-06T15:54:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38f9b3bce7d17a62 with lease ID 0xca14c9286e2b10df: from storage DS-b2912c1d-690f-478d-98ab-c66243fe8a2b node DatanodeRegistration(127.0.0.1:38889, datanodeUuid=d113bed3-53f6-4770-b39f-4a6bee037dce, infoPort=45483, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38f9b3bce7d17a62 with lease ID 0xca14c9286e2b10df: Processing first storage report for DS-2b0f1ef0-a90a-47b6-acd0-57a7c10fd161 from datanode DatanodeRegistration(127.0.0.1:38889, datanodeUuid=d113bed3-53f6-4770-b39f-4a6bee037dce, infoPort=45483, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151) 2024-12-06T15:54:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38f9b3bce7d17a62 with lease ID 0xca14c9286e2b10df: from storage DS-2b0f1ef0-a90a-47b6-acd0-57a7c10fd161 node DatanodeRegistration(127.0.0.1:38889, datanodeUuid=d113bed3-53f6-4770-b39f-4a6bee037dce, infoPort=45483, infoSecurePort=0, ipcPort=46383, storageInfo=lv=-57;cid=testClusterID;nsid=405567516;c=1733500442151), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:04,456 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26 2024-12-06T15:54:04,530 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-06T15:54:04,586 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=253, ProcessCount=11, AvailableMemoryMB=9600 2024-12-06T15:54:04,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T15:54:04,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-06T15:54:04,710 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/zookeeper_0, clientPort=62826, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T15:54:04,721 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62826 2024-12-06T15:54:04,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:04,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:04,839 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:04,839 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:04,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:52198 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52198 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:04,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-06T15:54:05,324 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:05,334 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972 with version=8 2024-12-06T15:54:05,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/hbase-staging 2024-12-06T15:54:05,435 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T15:54:05,686 INFO [Time-limited test {}] client.ConnectionUtils(128): master/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:05,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:05,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:05,702 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:05,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:05,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:05,848 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T15:54:05,911 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T15:54:05,920 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T15:54:05,924 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:05,952 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12170 (auto-detected) 2024-12-06T15:54:05,953 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T15:54:05,973 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42661 2024-12-06T15:54:06,015 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42661 connecting to ZooKeeper ensemble=127.0.0.1:62826 2024-12-06T15:54:06,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:426610x0, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:06,050 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42661-0x100680958f00000 connected 2024-12-06T15:54:06,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,096 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:06,102 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972, hbase.cluster.distributed=false 2024-12-06T15:54:06,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:06,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42661 2024-12-06T15:54:06,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42661 2024-12-06T15:54:06,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42661 2024-12-06T15:54:06,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42661 2024-12-06T15:54:06,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42661 2024-12-06T15:54:06,269 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:06,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,272 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:06,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:06,275 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T15:54:06,278 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:06,278 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34343 2024-12-06T15:54:06,280 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34343 connecting to ZooKeeper ensemble=127.0.0.1:62826 2024-12-06T15:54:06,281 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343430x0, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:06,290 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34343-0x100680958f00001 connected 2024-12-06T15:54:06,290 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:06,295 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T15:54:06,306 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T15:54:06,309 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T15:54:06,316 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:06,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34343 2024-12-06T15:54:06,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34343 2024-12-06T15:54:06,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34343 2024-12-06T15:54:06,318 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34343 2024-12-06T15:54:06,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34343 2024-12-06T15:54:06,342 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:06,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,343 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:06,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:06,343 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T15:54:06,344 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:06,345 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43623 2024-12-06T15:54:06,346 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43623 connecting to ZooKeeper ensemble=127.0.0.1:62826 2024-12-06T15:54:06,347 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436230x0, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:06,356 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43623-0x100680958f00002 connected 2024-12-06T15:54:06,356 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:06,357 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T15:54:06,359 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T15:54:06,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T15:54:06,362 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:06,364 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43623 2024-12-06T15:54:06,364 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43623 2024-12-06T15:54:06,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43623 2024-12-06T15:54:06,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43623 2024-12-06T15:54:06,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43623 2024-12-06T15:54:06,386 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:06,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,386 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:06,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:06,387 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:06,387 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T15:54:06,387 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:06,388 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42001 2024-12-06T15:54:06,389 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42001 connecting to ZooKeeper ensemble=127.0.0.1:62826 2024-12-06T15:54:06,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,393 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420010x0, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:06,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42001-0x100680958f00003 connected 2024-12-06T15:54:06,400 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:06,400 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T15:54:06,401 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T15:54:06,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T15:54:06,405 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:06,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42001 2024-12-06T15:54:06,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42001 2024-12-06T15:54:06,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42001 2024-12-06T15:54:06,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42001 2024-12-06T15:54:06,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42001 2024-12-06T15:54:06,427 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;85bef17d9292:42661 2024-12-06T15:54:06,428 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/85bef17d9292,42661,1733500445489 2024-12-06T15:54:06,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,438 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/85bef17d9292,42661,1733500445489 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,461 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T15:54:06,463 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/85bef17d9292,42661,1733500445489 from backup master directory 2024-12-06T15:54:06,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/85bef17d9292,42661,1733500445489 2024-12-06T15:54:06,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:06,467 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:06,467 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=85bef17d9292,42661,1733500445489 2024-12-06T15:54:06,470 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T15:54:06,471 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T15:54:06,538 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/hbase.id] with ID: 7a50152d-534a-438e-85ad-c7c40b839edb 2024-12-06T15:54:06,538 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/.tmp/hbase.id 2024-12-06T15:54:06,545 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,545 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,548 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:52220 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52220 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:06,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-06T15:54:06,555 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:06,555 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/.tmp/hbase.id]:[hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/hbase.id] 2024-12-06T15:54:06,599 INFO [master/85bef17d9292:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:06,604 INFO [master/85bef17d9292:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T15:54:06,624 INFO [master/85bef17d9292:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-06T15:54:06,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:06,641 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,641 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:60896 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41117:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60896 dst: /127.0.0.1:41117 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:06,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-06T15:54:06,650 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:06,666 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T15:54:06,668 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T15:54:06,674 INFO [master/85bef17d9292:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T15:54:06,702 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,702 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,706 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:36208 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:38889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36208 dst: /127.0.0.1:38889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:06,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-06T15:54:06,712 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:06,734 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store 2024-12-06T15:54:06,754 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,754 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:06,760 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:52234 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52234 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:06,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-06T15:54:06,766 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:06,772 INFO [master/85bef17d9292:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T15:54:06,777 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:06,778 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T15:54:06,778 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:06,779 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:06,781 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T15:54:06,781 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:06,781 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:06,783 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733500446778Disabling compacts and flushes for region at 1733500446778Disabling writes for close at 1733500446781 (+3 ms)Writing region close event to WAL at 1733500446781Closed at 1733500446781 2024-12-06T15:54:06,785 WARN [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/.initializing 2024-12-06T15:54:06,785 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/WALs/85bef17d9292,42661,1733500445489 2024-12-06T15:54:06,797 INFO [master/85bef17d9292:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T15:54:06,816 INFO [master/85bef17d9292:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C42661%2C1733500445489, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/WALs/85bef17d9292,42661,1733500445489, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/oldWALs, maxLogs=10 2024-12-06T15:54:06,850 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/WALs/85bef17d9292,42661,1733500445489/85bef17d9292%2C42661%2C1733500445489.1733500446823, exclude list is [], retry=0 2024-12-06T15:54:06,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:06,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41117,DS-629ceda4-ec50-4137-b1e9-2fcee746f45c,DISK] 2024-12-06T15:54:06,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38889,DS-b2912c1d-690f-478d-98ab-c66243fe8a2b,DISK] 2024-12-06T15:54:06,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33463,DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c,DISK] 2024-12-06T15:54:06,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-06T15:54:06,914 INFO [master/85bef17d9292:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/WALs/85bef17d9292,42661,1733500445489/85bef17d9292%2C42661%2C1733500445489.1733500446823 2024-12-06T15:54:06,915 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44199:44199),(127.0.0.1/127.0.0.1:45483:45483),(127.0.0.1/127.0.0.1:35427:35427)] 2024-12-06T15:54:06,916 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T15:54:06,916 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:06,920 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:06,921 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:06,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:06,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T15:54:06,993 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:06,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:06,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:06,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T15:54:06,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:07,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T15:54:07,003 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:07,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T15:54:07,007 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:07,008 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,011 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,013 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,018 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,018 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,021 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T15:54:07,025 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:07,031 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T15:54:07,032 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74945227, jitterRate=0.1167709082365036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T15:54:07,038 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733500446934Initializing all the Stores at 1733500446936 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500446936Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500446937 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500446937Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500446937Cleaning up temporary data from old regions at 1733500447018 (+81 ms)Region opened successfully at 1733500447038 (+20 ms) 2024-12-06T15:54:07,039 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T15:54:07,074 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@205cf9d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:07,106 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T15:54:07,117 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T15:54:07,118 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T15:54:07,121 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T15:54:07,122 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T15:54:07,127 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-06T15:54:07,127 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T15:54:07,153 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T15:54:07,161 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T15:54:07,163 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T15:54:07,165 INFO [master/85bef17d9292:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T15:54:07,167 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T15:54:07,168 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T15:54:07,171 INFO [master/85bef17d9292:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T15:54:07,174 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T15:54:07,176 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T15:54:07,177 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T15:54:07,179 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T15:54:07,196 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T15:54:07,198 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,206 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=85bef17d9292,42661,1733500445489, sessionid=0x100680958f00000, setting cluster-up flag (Was=false) 2024-12-06T15:54:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,226 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T15:54:07,228 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=85bef17d9292,42661,1733500445489 2024-12-06T15:54:07,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:07,242 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T15:54:07,244 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=85bef17d9292,42661,1733500445489 2024-12-06T15:54:07,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-06T15:54:07,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-06T15:54:07,251 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T15:54:07,311 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(746): ClusterId : 7a50152d-534a-438e-85ad-c7c40b839edb 2024-12-06T15:54:07,311 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(746): ClusterId : 7a50152d-534a-438e-85ad-c7c40b839edb 2024-12-06T15:54:07,311 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(746): ClusterId : 7a50152d-534a-438e-85ad-c7c40b839edb 2024-12-06T15:54:07,314 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T15:54:07,314 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T15:54:07,314 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T15:54:07,320 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T15:54:07,320 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T15:54:07,320 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T15:54:07,320 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T15:54:07,320 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T15:54:07,320 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T15:54:07,324 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T15:54:07,324 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T15:54:07,324 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T15:54:07,328 DEBUG [RS:1;85bef17d9292:43623 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fb15521, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:07,329 DEBUG [RS:0;85bef17d9292:34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ada4e7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:07,329 DEBUG [RS:2;85bef17d9292:42001 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17bd4c7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:07,337 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:07,345 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;85bef17d9292:34343 2024-12-06T15:54:07,348 INFO [master/85bef17d9292:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T15:54:07,349 INFO [RS:0;85bef17d9292:34343 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T15:54:07,349 INFO [RS:0;85bef17d9292:34343 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T15:54:07,349 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T15:54:07,350 DEBUG [RS:1;85bef17d9292:43623 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;85bef17d9292:43623 2024-12-06T15:54:07,350 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;85bef17d9292:42001 2024-12-06T15:54:07,350 INFO [RS:1;85bef17d9292:43623 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T15:54:07,350 INFO [RS:2;85bef17d9292:42001 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T15:54:07,350 INFO [RS:1;85bef17d9292:43623 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T15:54:07,350 INFO [RS:2;85bef17d9292:42001 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T15:54:07,350 DEBUG [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T15:54:07,350 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T15:54:07,353 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(2659): reportForDuty to master=85bef17d9292,42661,1733500445489 with port=34343, startcode=1733500446227 2024-12-06T15:54:07,353 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(2659): reportForDuty to master=85bef17d9292,42661,1733500445489 with port=43623, startcode=1733500446341 2024-12-06T15:54:07,353 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(2659): reportForDuty to master=85bef17d9292,42661,1733500445489 with port=42001, startcode=1733500446385 2024-12-06T15:54:07,355 INFO [master/85bef17d9292:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T15:54:07,361 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 85bef17d9292,42661,1733500445489 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T15:54:07,366 DEBUG [RS:2;85bef17d9292:42001 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T15:54:07,366 DEBUG [RS:1;85bef17d9292:43623 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T15:54:07,366 DEBUG [RS:0;85bef17d9292:34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T15:54:07,368 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:07,368 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:07,368 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:07,368 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:07,368 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/85bef17d9292:0, corePoolSize=10, maxPoolSize=10 2024-12-06T15:54:07,369 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,369 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:07,369 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,375 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733500477375 2024-12-06T15:54:07,377 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T15:54:07,378 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:07,379 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T15:54:07,379 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T15:54:07,383 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T15:54:07,383 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T15:54:07,383 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T15:54:07,383 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T15:54:07,384 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,388 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,388 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T15:54:07,388 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T15:54:07,389 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T15:54:07,390 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T15:54:07,394 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T15:54:07,395 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T15:54:07,404 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.large.0-1733500447397,5,FailOnTimeoutGroup] 2024-12-06T15:54:07,408 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.small.0-1733500447404,5,FailOnTimeoutGroup] 2024-12-06T15:54:07,408 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,408 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T15:54:07,410 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,411 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57735, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T15:54:07,411 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,411 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39913, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T15:54:07,411 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:07,411 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49565, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T15:54:07,411 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:07,419 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 85bef17d9292,42001,1733500446385 2024-12-06T15:54:07,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42661 {}] master.ServerManager(517): Registering regionserver=85bef17d9292,42001,1733500446385 2024-12-06T15:54:07,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:60938 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:41117:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60938 dst: /127.0.0.1:41117 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:07,436 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 85bef17d9292,34343,1733500446227 2024-12-06T15:54:07,436 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42661 {}] master.ServerManager(517): Registering regionserver=85bef17d9292,34343,1733500446227 2024-12-06T15:54:07,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-06T15:54:07,440 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972 2024-12-06T15:54:07,441 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40849 2024-12-06T15:54:07,441 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T15:54:07,442 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:07,443 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 85bef17d9292,43623,1733500446341 2024-12-06T15:54:07,443 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42661 {}] master.ServerManager(517): Registering regionserver=85bef17d9292,43623,1733500446341 2024-12-06T15:54:07,443 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T15:54:07,444 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972 2024-12-06T15:54:07,444 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40849 2024-12-06T15:54:07,444 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T15:54:07,444 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972 2024-12-06T15:54:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:07,447 DEBUG [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972 2024-12-06T15:54:07,447 DEBUG [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40849 2024-12-06T15:54:07,447 DEBUG [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T15:54:07,450 DEBUG [RS:2;85bef17d9292:42001 {}] zookeeper.ZKUtil(111): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/85bef17d9292,42001,1733500446385 2024-12-06T15:54:07,450 WARN [RS:2;85bef17d9292:42001 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:07,451 INFO [RS:2;85bef17d9292:42001 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T15:54:07,451 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385 2024-12-06T15:54:07,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:07,452 DEBUG [RS:0;85bef17d9292:34343 {}] zookeeper.ZKUtil(111): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/85bef17d9292,34343,1733500446227 2024-12-06T15:54:07,452 WARN [RS:0;85bef17d9292:34343 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:07,452 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [85bef17d9292,42001,1733500446385] 2024-12-06T15:54:07,452 INFO [RS:0;85bef17d9292:34343 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T15:54:07,452 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [85bef17d9292,34343,1733500446227] 2024-12-06T15:54:07,452 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,34343,1733500446227 2024-12-06T15:54:07,452 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:07,453 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:07,453 DEBUG [RS:1;85bef17d9292:43623 {}] zookeeper.ZKUtil(111): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/85bef17d9292,43623,1733500446341 2024-12-06T15:54:07,454 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [85bef17d9292,43623,1733500446341] 2024-12-06T15:54:07,454 WARN [RS:1;85bef17d9292:43623 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:07,454 INFO [RS:1;85bef17d9292:43623 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T15:54:07,454 DEBUG [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,43623,1733500446341 2024-12-06T15:54:07,462 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:52272 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52272 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:07,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-06T15:54:07,470 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:07,471 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:07,474 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T15:54:07,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T15:54:07,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:07,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T15:54:07,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T15:54:07,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:07,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T15:54:07,485 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T15:54:07,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:07,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T15:54:07,487 INFO [RS:0;85bef17d9292:34343 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T15:54:07,487 INFO [RS:1;85bef17d9292:43623 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T15:54:07,488 INFO [RS:2;85bef17d9292:42001 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T15:54:07,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T15:54:07,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:07,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:07,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T15:54:07,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740 2024-12-06T15:54:07,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740 2024-12-06T15:54:07,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T15:54:07,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T15:54:07,496 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T15:54:07,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T15:54:07,510 INFO [RS:0;85bef17d9292:34343 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T15:54:07,510 INFO [RS:1;85bef17d9292:43623 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T15:54:07,510 INFO [RS:2;85bef17d9292:42001 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T15:54:07,522 INFO [RS:2;85bef17d9292:42001 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T15:54:07,522 INFO [RS:1;85bef17d9292:43623 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T15:54:07,522 INFO [RS:0;85bef17d9292:34343 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T15:54:07,522 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,522 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,522 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,529 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T15:54:07,529 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T15:54:07,530 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T15:54:07,530 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T15:54:07,531 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71166935, jitterRate=0.06046997010707855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T15:54:07,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733500447472Initializing all the Stores at 1733500447473 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500447474 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500447474Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500447474Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500447474Cleaning up temporary data from old regions at 1733500447495 (+21 ms)Region opened successfully at 1733500447533 (+38 ms) 2024-12-06T15:54:07,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T15:54:07,533 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T15:54:07,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T15:54:07,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T15:54:07,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T15:54:07,537 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T15:54:07,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733500447533Disabling compacts and flushes for region at 1733500447533Disabling writes for close at 1733500447534 (+1 ms)Writing region close event to WAL at 1733500447536 (+2 ms)Closed at 1733500447537 (+1 ms) 2024-12-06T15:54:07,539 INFO [RS:1;85bef17d9292:43623 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T15:54:07,539 INFO [RS:2;85bef17d9292:42001 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T15:54:07,539 INFO [RS:0;85bef17d9292:34343 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T15:54:07,541 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,541 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,541 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,541 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,541 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,541 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,541 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:07,542 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,542 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:07,543 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:2;85bef17d9292:42001 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:07,543 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,543 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:07,543 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,544 DEBUG [RS:1;85bef17d9292:43623 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:07,544 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,544 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,544 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,544 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,544 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,544 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:07,545 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,545 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,545 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,545 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,545 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,545 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:07,546 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:07,546 DEBUG [RS:0;85bef17d9292:34343 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:07,547 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,547 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,547 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,547 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,547 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,547 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,43623,1733500446341-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:07,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T15:54:07,552 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,552 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,552 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,552 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,552 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,552 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,34343,1733500446227-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:07,560 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,561 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,561 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,561 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,561 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,561 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42001,1733500446385-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:07,562 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T15:54:07,569 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T15:54:07,583 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T15:54:07,583 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T15:54:07,586 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,43623,1733500446341-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,586 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,34343,1733500446227-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,586 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,586 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,586 INFO [RS:0;85bef17d9292:34343 {}] regionserver.Replication(171): 85bef17d9292,34343,1733500446227 started 2024-12-06T15:54:07,586 INFO [RS:1;85bef17d9292:43623 {}] regionserver.Replication(171): 85bef17d9292,43623,1733500446341 started 2024-12-06T15:54:07,591 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T15:54:07,591 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42001,1733500446385-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,591 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,592 INFO [RS:2;85bef17d9292:42001 {}] regionserver.Replication(171): 85bef17d9292,42001,1733500446385 started 2024-12-06T15:54:07,609 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,610 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1482): Serving as 85bef17d9292,42001,1733500446385, RpcServer on 85bef17d9292/172.17.0.2:42001, sessionid=0x100680958f00003 2024-12-06T15:54:07,611 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T15:54:07,611 DEBUG [RS:2;85bef17d9292:42001 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 85bef17d9292,42001,1733500446385 2024-12-06T15:54:07,611 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,42001,1733500446385' 2024-12-06T15:54:07,611 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T15:54:07,612 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,612 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:07,612 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(1482): Serving as 85bef17d9292,43623,1733500446341, RpcServer on 85bef17d9292/172.17.0.2:43623, sessionid=0x100680958f00002 2024-12-06T15:54:07,612 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1482): Serving as 85bef17d9292,34343,1733500446227, RpcServer on 85bef17d9292/172.17.0.2:34343, sessionid=0x100680958f00001 2024-12-06T15:54:07,612 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T15:54:07,612 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T15:54:07,612 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T15:54:07,613 DEBUG [RS:0;85bef17d9292:34343 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 85bef17d9292,34343,1733500446227 2024-12-06T15:54:07,613 DEBUG [RS:1;85bef17d9292:43623 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 85bef17d9292,43623,1733500446341 2024-12-06T15:54:07,613 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,34343,1733500446227' 2024-12-06T15:54:07,613 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,43623,1733500446341' 2024-12-06T15:54:07,613 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T15:54:07,613 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T15:54:07,613 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T15:54:07,613 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T15:54:07,613 DEBUG [RS:2;85bef17d9292:42001 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 85bef17d9292,42001,1733500446385 2024-12-06T15:54:07,613 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,42001,1733500446385' 2024-12-06T15:54:07,613 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T15:54:07,613 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T15:54:07,614 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T15:54:07,614 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T15:54:07,614 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T15:54:07,614 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T15:54:07,615 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T15:54:07,615 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T15:54:07,615 DEBUG [RS:1;85bef17d9292:43623 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 85bef17d9292,43623,1733500446341 2024-12-06T15:54:07,615 DEBUG [RS:0;85bef17d9292:34343 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 85bef17d9292,34343,1733500446227 2024-12-06T15:54:07,615 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,43623,1733500446341' 2024-12-06T15:54:07,615 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,34343,1733500446227' 2024-12-06T15:54:07,615 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T15:54:07,615 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T15:54:07,615 DEBUG [RS:2;85bef17d9292:42001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T15:54:07,615 INFO [RS:2;85bef17d9292:42001 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T15:54:07,615 INFO [RS:2;85bef17d9292:42001 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T15:54:07,616 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T15:54:07,616 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T15:54:07,616 DEBUG [RS:1;85bef17d9292:43623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T15:54:07,616 INFO [RS:1;85bef17d9292:43623 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T15:54:07,616 INFO [RS:1;85bef17d9292:43623 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T15:54:07,617 DEBUG [RS:0;85bef17d9292:34343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T15:54:07,617 INFO [RS:0;85bef17d9292:34343 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T15:54:07,617 INFO [RS:0;85bef17d9292:34343 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T15:54:07,720 WARN [85bef17d9292:42661 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T15:54:07,721 INFO [RS:1;85bef17d9292:43623 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T15:54:07,721 INFO [RS:0;85bef17d9292:34343 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T15:54:07,721 INFO [RS:2;85bef17d9292:42001 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T15:54:07,725 INFO [RS:1;85bef17d9292:43623 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C43623%2C1733500446341, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,43623,1733500446341, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs, maxLogs=32 2024-12-06T15:54:07,725 INFO [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C42001%2C1733500446385, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs, maxLogs=32 2024-12-06T15:54:07,726 INFO [RS:0;85bef17d9292:34343 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C34343%2C1733500446227, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,34343,1733500446227, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs, maxLogs=32 2024-12-06T15:54:07,746 DEBUG [RS:2;85bef17d9292:42001 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385/85bef17d9292%2C42001%2C1733500446385.1733500447730, exclude list is [], retry=0 2024-12-06T15:54:07,750 DEBUG [RS:1;85bef17d9292:43623 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,43623,1733500446341/85bef17d9292%2C43623%2C1733500446341.1733500447730, exclude list is [], retry=0 2024-12-06T15:54:07,750 DEBUG [RS:0;85bef17d9292:34343 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,34343,1733500446227/85bef17d9292%2C34343%2C1733500446227.1733500447730, exclude list is [], retry=0 2024-12-06T15:54:07,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41117,DS-629ceda4-ec50-4137-b1e9-2fcee746f45c,DISK] 2024-12-06T15:54:07,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38889,DS-b2912c1d-690f-478d-98ab-c66243fe8a2b,DISK] 2024-12-06T15:54:07,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33463,DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c,DISK] 2024-12-06T15:54:07,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41117,DS-629ceda4-ec50-4137-b1e9-2fcee746f45c,DISK] 2024-12-06T15:54:07,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41117,DS-629ceda4-ec50-4137-b1e9-2fcee746f45c,DISK] 2024-12-06T15:54:07,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38889,DS-b2912c1d-690f-478d-98ab-c66243fe8a2b,DISK] 2024-12-06T15:54:07,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33463,DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c,DISK] 2024-12-06T15:54:07,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33463,DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c,DISK] 2024-12-06T15:54:07,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38889,DS-b2912c1d-690f-478d-98ab-c66243fe8a2b,DISK] 2024-12-06T15:54:07,806 INFO [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385/85bef17d9292%2C42001%2C1733500446385.1733500447730 2024-12-06T15:54:07,807 DEBUG [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35427:35427),(127.0.0.1/127.0.0.1:45483:45483),(127.0.0.1/127.0.0.1:44199:44199)] 2024-12-06T15:54:07,808 INFO [RS:0;85bef17d9292:34343 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,34343,1733500446227/85bef17d9292%2C34343%2C1733500446227.1733500447730 2024-12-06T15:54:07,809 INFO [RS:1;85bef17d9292:43623 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,43623,1733500446341/85bef17d9292%2C43623%2C1733500446341.1733500447730 2024-12-06T15:54:07,809 DEBUG [RS:0;85bef17d9292:34343 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35427:35427),(127.0.0.1/127.0.0.1:44199:44199),(127.0.0.1/127.0.0.1:45483:45483)] 2024-12-06T15:54:07,809 DEBUG [RS:1;85bef17d9292:43623 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35427:35427),(127.0.0.1/127.0.0.1:44199:44199),(127.0.0.1/127.0.0.1:45483:45483)] 2024-12-06T15:54:07,973 DEBUG [85bef17d9292:42661 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-06T15:54:07,981 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(204): Hosts are {85bef17d9292=0} racks are {/default-rack=0} 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T15:54:07,988 INFO [85bef17d9292:42661 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T15:54:07,988 INFO [85bef17d9292:42661 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T15:54:07,988 INFO [85bef17d9292:42661 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T15:54:07,988 DEBUG [85bef17d9292:42661 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T15:54:07,996 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=85bef17d9292,42001,1733500446385 2024-12-06T15:54:08,002 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 85bef17d9292,42001,1733500446385, state=OPENING 2024-12-06T15:54:08,008 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T15:54:08,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:08,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:08,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:08,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:08,011 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,011 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,011 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,011 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,012 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T15:54:08,014 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=85bef17d9292,42001,1733500446385}] 2024-12-06T15:54:08,191 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T15:54:08,193 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T15:54:08,207 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T15:54:08,208 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T15:54:08,209 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-06T15:54:08,212 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C42001%2C1733500446385.meta, suffix=.meta, logDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs, maxLogs=32 2024-12-06T15:54:08,228 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385/85bef17d9292%2C42001%2C1733500446385.meta.1733500448214.meta, exclude list is [], retry=0 2024-12-06T15:54:08,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33463,DS-8e1cf11d-3103-4554-a1cf-6c3c58d1bf6c,DISK] 2024-12-06T15:54:08,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38889,DS-b2912c1d-690f-478d-98ab-c66243fe8a2b,DISK] 2024-12-06T15:54:08,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41117,DS-629ceda4-ec50-4137-b1e9-2fcee746f45c,DISK] 2024-12-06T15:54:08,238 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/WALs/85bef17d9292,42001,1733500446385/85bef17d9292%2C42001%2C1733500446385.meta.1733500448214.meta 2024-12-06T15:54:08,238 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44199:44199),(127.0.0.1/127.0.0.1:45483:45483),(127.0.0.1/127.0.0.1:35427:35427)] 2024-12-06T15:54:08,238 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T15:54:08,240 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T15:54:08,243 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T15:54:08,248 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T15:54:08,252 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T15:54:08,252 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:08,253 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T15:54:08,253 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T15:54:08,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T15:54:08,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T15:54:08,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:08,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:08,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T15:54:08,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T15:54:08,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:08,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:08,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T15:54:08,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T15:54:08,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:08,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:08,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T15:54:08,267 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T15:54:08,267 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:08,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:08,294 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T15:54:08,301 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740 2024-12-06T15:54:08,305 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740 2024-12-06T15:54:08,307 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T15:54:08,308 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T15:54:08,309 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T15:54:08,312 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T15:54:08,314 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72417777, jitterRate=0.07910896837711334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T15:54:08,314 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T15:54:08,316 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733500448253Writing region info on filesystem at 1733500448253Initializing all the Stores at 1733500448255 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500448256 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500448256Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500448256Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500448256Cleaning up temporary data from old regions at 1733500448308 (+52 ms)Running coprocessor post-open hooks at 1733500448314 (+6 ms)Region opened successfully at 1733500448316 (+2 ms) 2024-12-06T15:54:08,330 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733500448181 2024-12-06T15:54:08,344 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T15:54:08,344 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T15:54:08,346 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=85bef17d9292,42001,1733500446385 2024-12-06T15:54:08,348 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 85bef17d9292,42001,1733500446385, state=OPEN 2024-12-06T15:54:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:08,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:08,352 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=85bef17d9292,42001,1733500446385 2024-12-06T15:54:08,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T15:54:08,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=85bef17d9292,42001,1733500446385 in 338 msec 2024-12-06T15:54:08,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T15:54:08,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 809 msec 2024-12-06T15:54:08,366 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:08,366 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T15:54:08,387 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T15:54:08,388 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=85bef17d9292,42001,1733500446385, seqNum=-1] 2024-12-06T15:54:08,408 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T15:54:08,411 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55163, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T15:54:08,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1510 sec 2024-12-06T15:54:08,431 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733500448431, completionTime=-1 2024-12-06T15:54:08,434 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-06T15:54:08,434 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T15:54:08,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-06T15:54:08,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733500508461 2024-12-06T15:54:08,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733500568461 2024-12-06T15:54:08,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-06T15:54:08,463 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-06T15:54:08,469 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42661,1733500445489-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,469 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42661,1733500445489-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,469 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42661,1733500445489-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,471 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-85bef17d9292:42661, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,471 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,472 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,479 DEBUG [master/85bef17d9292:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T15:54:08,499 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.031sec 2024-12-06T15:54:08,501 INFO [master/85bef17d9292:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T15:54:08,502 INFO [master/85bef17d9292:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T15:54:08,503 INFO [master/85bef17d9292:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T15:54:08,503 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T15:54:08,503 INFO [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T15:54:08,504 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42661,1733500445489-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:08,504 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42661,1733500445489-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T15:54:08,509 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T15:54:08,510 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T15:54:08,510 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,42661,1733500445489-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:08,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@418871fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T15:54:08,526 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T15:54:08,526 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T15:54:08,530 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 85bef17d9292,42661,-1 for getting cluster id 2024-12-06T15:54:08,533 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T15:54:08,541 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7a50152d-534a-438e-85ad-c7c40b839edb' 2024-12-06T15:54:08,544 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T15:54:08,544 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7a50152d-534a-438e-85ad-c7c40b839edb" 2024-12-06T15:54:08,546 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@da3b791, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T15:54:08,546 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [85bef17d9292,42661,-1] 2024-12-06T15:54:08,549 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T15:54:08,550 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:08,551 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T15:54:08,554 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f46cc90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T15:54:08,555 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T15:54:08,563 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=85bef17d9292,42001,1733500446385, seqNum=-1] 2024-12-06T15:54:08,563 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T15:54:08,565 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T15:54:08,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=85bef17d9292,42661,1733500445489 2024-12-06T15:54:08,589 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T15:54:08,594 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 85bef17d9292,42661,1733500445489 2024-12-06T15:54:08,596 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@44636b17 2024-12-06T15:54:08,597 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T15:54:08,599 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T15:54:08,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T15:54:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-06T15:54:08,617 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T15:54:08,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-06T15:54:08,619 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:08,621 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T15:54:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:08,629 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:08,629 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:08,635 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:36274 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:38889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36274 dst: /127.0.0.1:38889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:08,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-06T15:54:08,642 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:08,645 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0012bf01c3f0ccf92eb4034db397cd47, NAME => 'TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972 2024-12-06T15:54:08,650 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:08,651 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:08,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:60988 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41117:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60988 dst: /127.0.0.1:41117 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:08,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-06T15:54:08,665 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:08,665 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:08,666 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 0012bf01c3f0ccf92eb4034db397cd47, disabling compactions & flushes 2024-12-06T15:54:08,666 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:08,666 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:08,666 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. after waiting 0 ms 2024-12-06T15:54:08,666 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:08,666 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:08,666 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0012bf01c3f0ccf92eb4034db397cd47: Waiting for close lock at 1733500448665Disabling compacts and flushes for region at 1733500448665Disabling writes for close at 1733500448666 (+1 ms)Writing region close event to WAL at 1733500448666Closed at 1733500448666 2024-12-06T15:54:08,668 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T15:54:08,673 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733500448668"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733500448668"}]},"ts":"1733500448668"} 2024-12-06T15:54:08,678 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T15:54:08,680 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T15:54:08,683 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733500448680"}]},"ts":"1733500448680"} 2024-12-06T15:54:08,687 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-06T15:54:08,687 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {85bef17d9292=0} racks are {/default-rack=0} 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T15:54:08,689 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T15:54:08,689 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T15:54:08,689 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T15:54:08,689 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T15:54:08,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0012bf01c3f0ccf92eb4034db397cd47, ASSIGN}] 2024-12-06T15:54:08,692 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0012bf01c3f0ccf92eb4034db397cd47, ASSIGN 2024-12-06T15:54:08,694 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0012bf01c3f0ccf92eb4034db397cd47, ASSIGN; state=OFFLINE, location=85bef17d9292,34343,1733500446227; forceNewPlan=false, retain=false 2024-12-06T15:54:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:08,847 INFO [85bef17d9292:42661 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T15:54:08,847 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0012bf01c3f0ccf92eb4034db397cd47, regionState=OPENING, regionLocation=85bef17d9292,34343,1733500446227 2024-12-06T15:54:08,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0012bf01c3f0ccf92eb4034db397cd47, ASSIGN because future has completed 2024-12-06T15:54:08,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0012bf01c3f0ccf92eb4034db397cd47, server=85bef17d9292,34343,1733500446227}] 2024-12-06T15:54:08,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:09,013 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T15:54:09,015 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60165, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T15:54:09,021 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,022 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0012bf01c3f0ccf92eb4034db397cd47, NAME => 'TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47.', STARTKEY => '', ENDKEY => ''} 2024-12-06T15:54:09,022 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,022 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:09,022 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,022 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,025 INFO [StoreOpener-0012bf01c3f0ccf92eb4034db397cd47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,027 INFO [StoreOpener-0012bf01c3f0ccf92eb4034db397cd47-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0012bf01c3f0ccf92eb4034db397cd47 columnFamilyName cf 2024-12-06T15:54:09,027 DEBUG [StoreOpener-0012bf01c3f0ccf92eb4034db397cd47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:09,029 INFO [StoreOpener-0012bf01c3f0ccf92eb4034db397cd47-1 {}] regionserver.HStore(327): Store=0012bf01c3f0ccf92eb4034db397cd47/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:09,029 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,030 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,030 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,031 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,031 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,034 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,039 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T15:54:09,040 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0012bf01c3f0ccf92eb4034db397cd47; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74010232, jitterRate=0.10283839702606201}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T15:54:09,040 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,042 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0012bf01c3f0ccf92eb4034db397cd47: Running coprocessor pre-open hook at 1733500449022Writing region info on filesystem at 1733500449022Initializing all the Stores at 1733500449024 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500449024Cleaning up temporary data from old regions at 1733500449031 (+7 ms)Running coprocessor post-open hooks at 1733500449040 (+9 ms)Region opened successfully at 1733500449041 (+1 ms) 2024-12-06T15:54:09,044 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47., pid=6, masterSystemTime=1733500449012 2024-12-06T15:54:09,048 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,048 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,050 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0012bf01c3f0ccf92eb4034db397cd47, regionState=OPEN, openSeqNum=2, regionLocation=85bef17d9292,34343,1733500446227 2024-12-06T15:54:09,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0012bf01c3f0ccf92eb4034db397cd47, server=85bef17d9292,34343,1733500446227 because future has completed 2024-12-06T15:54:09,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T15:54:09,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0012bf01c3f0ccf92eb4034db397cd47, server=85bef17d9292,34343,1733500446227 in 205 msec 2024-12-06T15:54:09,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T15:54:09,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0012bf01c3f0ccf92eb4034db397cd47, ASSIGN in 379 msec 2024-12-06T15:54:09,076 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T15:54:09,076 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733500449076"}]},"ts":"1733500449076"} 2024-12-06T15:54:09,080 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-06T15:54:09,082 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T15:54:09,085 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 474 msec 2024-12-06T15:54:09,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:09,260 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T15:54:09,260 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-06T15:54:09,262 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T15:54:09,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-06T15:54:09,268 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T15:54:09,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-06T15:54:09,280 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47., hostname=85bef17d9292,34343,1733500446227, seqNum=2] 2024-12-06T15:54:09,282 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T15:54:09,284 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T15:54:09,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-06T15:54:09,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-06T15:54:09,300 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-06T15:54:09,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:09,301 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T15:54:09,303 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T15:54:09,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:09,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34343 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-06T15:54:09,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,470 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0012bf01c3f0ccf92eb4034db397cd47 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-06T15:54:09,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/.tmp/cf/317f12bdaa15435688a542713a5c3fff is 36, key is row/cf:cq/1733500449285/Put/seqid=0 2024-12-06T15:54:09,532 WARN [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:09,532 WARN [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:09,536 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_871789061_22 at /127.0.0.1:52328 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52328 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:09,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-06T15:54:09,542 WARN [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:09,543 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/.tmp/cf/317f12bdaa15435688a542713a5c3fff 2024-12-06T15:54:09,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/.tmp/cf/317f12bdaa15435688a542713a5c3fff as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/cf/317f12bdaa15435688a542713a5c3fff 2024-12-06T15:54:09,610 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/cf/317f12bdaa15435688a542713a5c3fff, entries=1, sequenceid=5, filesize=4.7 K 2024-12-06T15:54:09,619 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 0012bf01c3f0ccf92eb4034db397cd47 in 147ms, sequenceid=5, compaction requested=false 2024-12-06T15:54:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:09,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-06T15:54:09,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0012bf01c3f0ccf92eb4034db397cd47: 2024-12-06T15:54:09,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-06T15:54:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-06T15:54:09,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-06T15:54:09,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 328 msec 2024-12-06T15:54:09,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 343 msec 2024-12-06T15:54:09,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:09,929 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T15:54:09,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T15:54:09,944 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T15:54:09,944 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:09,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,949 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T15:54:09,950 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T15:54:09,950 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1025016125, stopped=false 2024-12-06T15:54:09,950 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=85bef17d9292,42661,1733500445489 2024-12-06T15:54:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:09,953 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T15:54:09,953 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T15:54:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:09,953 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:09,953 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,953 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:09,954 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '85bef17d9292,34343,1733500446227' ***** 2024-12-06T15:54:09,954 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T15:54:09,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:09,954 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '85bef17d9292,43623,1733500446341' ***** 2024-12-06T15:54:09,954 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T15:54:09,954 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '85bef17d9292,42001,1733500446385' ***** 2024-12-06T15:54:09,954 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T15:54:09,954 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T15:54:09,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:09,954 INFO [RS:0;85bef17d9292:34343 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T15:54:09,955 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T15:54:09,955 INFO [RS:0;85bef17d9292:34343 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T15:54:09,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:09,955 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(3091): Received CLOSE for 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,955 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T15:54:09,955 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:09,956 INFO [RS:2;85bef17d9292:42001 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T15:54:09,956 INFO [RS:2;85bef17d9292:42001 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T15:54:09,956 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(959): stopping server 85bef17d9292,42001,1733500446385 2024-12-06T15:54:09,956 INFO [RS:2;85bef17d9292:42001 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:09,956 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:09,956 INFO [RS:2;85bef17d9292:42001 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;85bef17d9292:42001. 2024-12-06T15:54:09,956 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T15:54:09,956 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(959): stopping server 85bef17d9292,34343,1733500446227 2024-12-06T15:54:09,956 INFO [RS:0;85bef17d9292:34343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:09,957 INFO [RS:0;85bef17d9292:34343 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;85bef17d9292:34343. 2024-12-06T15:54:09,957 DEBUG [RS:2;85bef17d9292:42001 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:09,957 DEBUG [RS:2;85bef17d9292:42001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,957 DEBUG [RS:0;85bef17d9292:34343 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:09,957 INFO [RS:2;85bef17d9292:42001 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T15:54:09,957 INFO [RS:2;85bef17d9292:42001 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T15:54:09,957 INFO [RS:2;85bef17d9292:42001 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T15:54:09,957 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T15:54:09,957 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0012bf01c3f0ccf92eb4034db397cd47, disabling compactions & flushes 2024-12-06T15:54:09,957 DEBUG [RS:0;85bef17d9292:34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,957 INFO [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,957 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,957 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. after waiting 0 ms 2024-12-06T15:54:09,957 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-06T15:54:09,958 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-06T15:54:09,958 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1325): Online Regions={0012bf01c3f0ccf92eb4034db397cd47=TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47.} 2024-12-06T15:54:09,958 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-06T15:54:09,958 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,958 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T15:54:09,958 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T15:54:09,958 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T15:54:09,958 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T15:54:09,958 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T15:54:09,958 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-06T15:54:09,958 DEBUG [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1351): Waiting on 0012bf01c3f0ccf92eb4034db397cd47 2024-12-06T15:54:09,958 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T15:54:09,958 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-06T15:54:09,958 INFO [RS:1;85bef17d9292:43623 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T15:54:09,958 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T15:54:09,958 INFO [RS:1;85bef17d9292:43623 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T15:54:09,958 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(959): stopping server 85bef17d9292,43623,1733500446341 2024-12-06T15:54:09,959 INFO [RS:1;85bef17d9292:43623 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:09,959 INFO [RS:1;85bef17d9292:43623 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;85bef17d9292:43623. 2024-12-06T15:54:09,959 DEBUG [RS:1;85bef17d9292:43623 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:09,959 DEBUG [RS:1;85bef17d9292:43623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,959 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(976): stopping server 85bef17d9292,43623,1733500446341; all regions closed. 2024-12-06T15:54:09,959 INFO [regionserver/85bef17d9292:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:09,960 INFO [regionserver/85bef17d9292:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:09,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_1073741827_1017 (size=93) 2024-12-06T15:54:09,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_1073741827_1017 (size=93) 2024-12-06T15:54:09,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741827_1017 (size=93) 2024-12-06T15:54:09,980 DEBUG [RS:1;85bef17d9292:43623 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs 2024-12-06T15:54:09,980 INFO [RS:1;85bef17d9292:43623 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 85bef17d9292%2C43623%2C1733500446341:(num 1733500447730) 2024-12-06T15:54:09,981 DEBUG [RS:1;85bef17d9292:43623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:09,981 INFO [RS:1;85bef17d9292:43623 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:09,981 INFO [RS:1;85bef17d9292:43623 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:09,981 INFO [RS:1;85bef17d9292:43623 {}] hbase.ChoreService(370): Chore service for: regionserver/85bef17d9292:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:09,981 INFO [RS:1;85bef17d9292:43623 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T15:54:09,981 INFO [RS:1;85bef17d9292:43623 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T15:54:09,981 INFO [RS:1;85bef17d9292:43623 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T15:54:09,981 INFO [regionserver/85bef17d9292:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:09,982 INFO [RS:1;85bef17d9292:43623 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:09,982 INFO [RS:1;85bef17d9292:43623 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43623 2024-12-06T15:54:09,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/85bef17d9292,43623,1733500446341 2024-12-06T15:54:09,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:09,986 INFO [RS:1;85bef17d9292:43623 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:09,989 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [85bef17d9292,43623,1733500446341] 2024-12-06T15:54:09,991 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/default/TestHBaseWalOnEC/0012bf01c3f0ccf92eb4034db397cd47/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-06T15:54:09,991 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/85bef17d9292,43623,1733500446341 already deleted, retry=false 2024-12-06T15:54:09,991 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 85bef17d9292,43623,1733500446341 expired; onlineServers=2 2024-12-06T15:54:09,994 INFO [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,994 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0012bf01c3f0ccf92eb4034db397cd47: Waiting for close lock at 1733500449957Running coprocessor pre-close hooks at 1733500449957Disabling compacts and flushes for region at 1733500449957Disabling writes for close at 1733500449957Writing region close event to WAL at 1733500449968 (+11 ms)Running coprocessor post-close hooks at 1733500449992 (+24 ms)Closed at 1733500449994 (+2 ms) 2024-12-06T15:54:09,994 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47. 2024-12-06T15:54:09,995 INFO [regionserver/85bef17d9292:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:10,001 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/info/179f00785a4b44c1b10ae14bde65fea3 is 153, key is TestHBaseWalOnEC,,1733500448600.0012bf01c3f0ccf92eb4034db397cd47./info:regioninfo/1733500449049/Put/seqid=0 2024-12-06T15:54:10,004 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,005 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,010 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_882497012_22 at /127.0.0.1:52352 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52352 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-06T15:54:10,015 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,016 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/info/179f00785a4b44c1b10ae14bde65fea3 2024-12-06T15:54:10,046 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/ns/c07dee6695ac4c8686576b89345a12de is 43, key is default/ns:d/1733500448415/Put/seqid=0 2024-12-06T15:54:10,049 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,049 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-06T15:54:10,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-06T15:54:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-06T15:54:10,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-06T15:54:10,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-06T15:54:10,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-06T15:54:10,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-06T15:54:10,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-06T15:54:10,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_882497012_22 at /127.0.0.1:36310 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:38889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36310 dst: /127.0.0.1:38889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-06T15:54:10,070 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,071 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/ns/c07dee6695ac4c8686576b89345a12de 2024-12-06T15:54:10,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:10,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43623-0x100680958f00002, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:10,090 INFO [RS:1;85bef17d9292:43623 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:10,090 INFO [RS:1;85bef17d9292:43623 {}] regionserver.HRegionServer(1031): Exiting; stopping=85bef17d9292,43623,1733500446341; zookeeper connection closed. 2024-12-06T15:54:10,090 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63da0447 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63da0447 2024-12-06T15:54:10,099 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/table/826ff71905b14accba43dafd8370a263 is 52, key is TestHBaseWalOnEC/table:state/1733500449076/Put/seqid=0 2024-12-06T15:54:10,101 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,101 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_882497012_22 at /127.0.0.1:52416 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52416 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-06T15:54:10,109 WARN [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,109 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/table/826ff71905b14accba43dafd8370a263 2024-12-06T15:54:10,121 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/info/179f00785a4b44c1b10ae14bde65fea3 as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/info/179f00785a4b44c1b10ae14bde65fea3 2024-12-06T15:54:10,132 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/info/179f00785a4b44c1b10ae14bde65fea3, entries=10, sequenceid=11, filesize=6.5 K 2024-12-06T15:54:10,134 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/ns/c07dee6695ac4c8686576b89345a12de as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/ns/c07dee6695ac4c8686576b89345a12de 2024-12-06T15:54:10,143 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/ns/c07dee6695ac4c8686576b89345a12de, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T15:54:10,145 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/.tmp/table/826ff71905b14accba43dafd8370a263 as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/table/826ff71905b14accba43dafd8370a263 2024-12-06T15:54:10,155 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/table/826ff71905b14accba43dafd8370a263, entries=2, sequenceid=11, filesize=5.1 K 2024-12-06T15:54:10,157 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 198ms, sequenceid=11, compaction requested=false 2024-12-06T15:54:10,157 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T15:54:10,158 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(976): stopping server 85bef17d9292,34343,1733500446227; all regions closed. 2024-12-06T15:54:10,158 DEBUG [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-06T15:54:10,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_1073741828_1018 (size=1298) 2024-12-06T15:54:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_1073741828_1018 (size=1298) 2024-12-06T15:54:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741828_1018 (size=1298) 2024-12-06T15:54:10,167 DEBUG [RS:0;85bef17d9292:34343 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs 2024-12-06T15:54:10,167 INFO [RS:0;85bef17d9292:34343 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 85bef17d9292%2C34343%2C1733500446227:(num 1733500447730) 2024-12-06T15:54:10,167 DEBUG [RS:0;85bef17d9292:34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:10,167 INFO [RS:0;85bef17d9292:34343 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:10,167 INFO [RS:0;85bef17d9292:34343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:10,167 INFO [RS:0;85bef17d9292:34343 {}] hbase.ChoreService(370): Chore service for: regionserver/85bef17d9292:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:10,168 INFO [RS:0;85bef17d9292:34343 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T15:54:10,168 INFO [regionserver/85bef17d9292:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:10,168 INFO [RS:0;85bef17d9292:34343 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T15:54:10,168 INFO [RS:0;85bef17d9292:34343 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T15:54:10,168 INFO [RS:0;85bef17d9292:34343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:10,168 INFO [RS:0;85bef17d9292:34343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34343 2024-12-06T15:54:10,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/85bef17d9292,34343,1733500446227 2024-12-06T15:54:10,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:10,171 INFO [RS:0;85bef17d9292:34343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:10,173 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T15:54:10,173 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [85bef17d9292,34343,1733500446227] 2024-12-06T15:54:10,176 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T15:54:10,176 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T15:54:10,176 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733500449958Running coprocessor pre-close hooks at 1733500449958Disabling compacts and flushes for region at 1733500449958Disabling writes for close at 1733500449958Obtaining lock to block concurrent updates at 1733500449958Preparing flush snapshotting stores in 1588230740 at 1733500449958Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733500449959 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733500449961 (+2 ms)Flushing 1588230740/info: creating writer at 1733500449964 (+3 ms)Flushing 1588230740/info: appending metadata at 1733500449998 (+34 ms)Flushing 1588230740/info: closing flushed file at 1733500449998Flushing 1588230740/ns: creating writer at 1733500450027 (+29 ms)Flushing 1588230740/ns: appending metadata at 1733500450045 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1733500450045Flushing 1588230740/table: creating writer at 1733500450081 (+36 ms)Flushing 1588230740/table: appending metadata at 1733500450098 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733500450098Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1db65e97: reopening flushed file at 1733500450120 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d08c4a0: reopening flushed file at 1733500450132 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a5f8543: reopening flushed file at 1733500450143 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 198ms, sequenceid=11, compaction requested=false at 1733500450157 (+14 ms)Writing region close event to WAL at 1733500450159 (+2 ms)Running coprocessor post-close hooks at 1733500450176 (+17 ms)Closed at 1733500450176 2024-12-06T15:54:10,177 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T15:54:10,177 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/85bef17d9292,34343,1733500446227 already deleted, retry=false 2024-12-06T15:54:10,177 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 85bef17d9292,34343,1733500446227 expired; onlineServers=1 2024-12-06T15:54:10,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-06T15:54:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-06T15:54:10,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-06T15:54:10,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-06T15:54:10,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:10,273 INFO [RS:0;85bef17d9292:34343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:10,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34343-0x100680958f00001, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:10,274 INFO [RS:0;85bef17d9292:34343 {}] regionserver.HRegionServer(1031): Exiting; stopping=85bef17d9292,34343,1733500446227; zookeeper connection closed. 2024-12-06T15:54:10,274 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@30205bbe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@30205bbe 2024-12-06T15:54:10,358 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(976): stopping server 85bef17d9292,42001,1733500446385; all regions closed. 2024-12-06T15:54:10,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_1073741829_1019 (size=2751) 2024-12-06T15:54:10,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_1073741829_1019 (size=2751) 2024-12-06T15:54:10,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741829_1019 (size=2751) 2024-12-06T15:54:10,368 DEBUG [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs 2024-12-06T15:54:10,368 INFO [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 85bef17d9292%2C42001%2C1733500446385.meta:.meta(num 1733500448214) 2024-12-06T15:54:10,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_1073741826_1016 (size=93) 2024-12-06T15:54:10,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_1073741826_1016 (size=93) 2024-12-06T15:54:10,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741826_1016 (size=93) 2024-12-06T15:54:10,375 DEBUG [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/oldWALs 2024-12-06T15:54:10,375 INFO [RS:2;85bef17d9292:42001 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 85bef17d9292%2C42001%2C1733500446385:(num 1733500447730) 2024-12-06T15:54:10,375 DEBUG [RS:2;85bef17d9292:42001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:10,375 INFO [RS:2;85bef17d9292:42001 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:10,375 INFO [RS:2;85bef17d9292:42001 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:10,376 INFO [RS:2;85bef17d9292:42001 {}] hbase.ChoreService(370): Chore service for: regionserver/85bef17d9292:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:10,376 INFO [RS:2;85bef17d9292:42001 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:10,376 INFO [regionserver/85bef17d9292:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:10,377 INFO [RS:2;85bef17d9292:42001 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42001 2024-12-06T15:54:10,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/85bef17d9292,42001,1733500446385 2024-12-06T15:54:10,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:10,379 INFO [RS:2;85bef17d9292:42001 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:10,380 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [85bef17d9292,42001,1733500446385] 2024-12-06T15:54:10,382 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/85bef17d9292,42001,1733500446385 already deleted, retry=false 2024-12-06T15:54:10,382 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 85bef17d9292,42001,1733500446385 expired; onlineServers=0 2024-12-06T15:54:10,383 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '85bef17d9292,42661,1733500445489' ***** 2024-12-06T15:54:10,383 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T15:54:10,383 INFO [M:0;85bef17d9292:42661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:10,383 INFO [M:0;85bef17d9292:42661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:10,383 DEBUG [M:0;85bef17d9292:42661 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T15:54:10,383 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T15:54:10,383 DEBUG [M:0;85bef17d9292:42661 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T15:54:10,383 DEBUG [master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.large.0-1733500447397 {}] cleaner.HFileCleaner(306): Exit Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.large.0-1733500447397,5,FailOnTimeoutGroup] 2024-12-06T15:54:10,384 INFO [M:0;85bef17d9292:42661 {}] hbase.ChoreService(370): Chore service for: master/85bef17d9292:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:10,384 INFO [M:0;85bef17d9292:42661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:10,384 DEBUG [M:0;85bef17d9292:42661 {}] master.HMaster(1795): Stopping service threads 2024-12-06T15:54:10,384 INFO [M:0;85bef17d9292:42661 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T15:54:10,384 DEBUG [master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.small.0-1733500447404 {}] cleaner.HFileCleaner(306): Exit Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.small.0-1733500447404,5,FailOnTimeoutGroup] 2024-12-06T15:54:10,384 INFO [M:0;85bef17d9292:42661 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T15:54:10,385 INFO [M:0;85bef17d9292:42661 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T15:54:10,385 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T15:54:10,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:10,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:10,389 DEBUG [M:0;85bef17d9292:42661 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-06T15:54:10,389 DEBUG [M:0;85bef17d9292:42661 {}] master.ActiveMasterManager(353): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-06T15:54:10,390 INFO [M:0;85bef17d9292:42661 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/.lastflushedseqids 2024-12-06T15:54:10,403 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,403 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:36386 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36386 dst: /127.0.0.1:38889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-06T15:54:10,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:10,482 INFO [RS:2;85bef17d9292:42001 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:10,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42001-0x100680958f00003, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:10,482 INFO [RS:2;85bef17d9292:42001 {}] regionserver.HRegionServer(1031): Exiting; stopping=85bef17d9292,42001,1733500446385; zookeeper connection closed. 2024-12-06T15:54:10,482 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20416ba6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20416ba6 2024-12-06T15:54:10,483 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-06T15:54:10,821 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,821 INFO [M:0;85bef17d9292:42661 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T15:54:10,822 INFO [M:0;85bef17d9292:42661 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T15:54:10,822 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T15:54:10,822 INFO [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:10,822 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:10,822 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T15:54:10,822 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:10,823 INFO [M:0;85bef17d9292:42661 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-06T15:54:10,851 DEBUG [M:0;85bef17d9292:42661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aece3e8de7c247ba8daa671f8c8b3259 is 82, key is hbase:meta,,1/info:regioninfo/1733500448346/Put/seqid=0 2024-12-06T15:54:10,853 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,853 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:36398 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:38889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36398 dst: /127.0.0.1:38889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-06T15:54:10,861 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,861 INFO [M:0;85bef17d9292:42661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aece3e8de7c247ba8daa671f8c8b3259 2024-12-06T15:54:10,888 DEBUG [M:0;85bef17d9292:42661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25c3d79a8d5c4404829e6cb8c5333563 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733500449084/Put/seqid=0 2024-12-06T15:54:10,891 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,891 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,894 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:36420 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:38889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36420 dst: /127.0.0.1:38889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-06T15:54:10,898 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,899 INFO [M:0;85bef17d9292:42661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25c3d79a8d5c4404829e6cb8c5333563 2024-12-06T15:54:10,924 DEBUG [M:0;85bef17d9292:42661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e9fb48af52e24921b27d0b91e2e41980 is 69, key is 85bef17d9292,34343,1733500446227/rs:state/1733500447437/Put/seqid=0 2024-12-06T15:54:10,926 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,926 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T15:54:10,928 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_744356302_22 at /127.0.0.1:52456 [Receiving block BP-1561492056-172.17.0.2-1733500442151:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33463:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52456 dst: /127.0.0.1:33463 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T15:54:10,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-06T15:54:10,932 WARN [M:0;85bef17d9292:42661 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T15:54:10,933 INFO [M:0;85bef17d9292:42661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e9fb48af52e24921b27d0b91e2e41980 2024-12-06T15:54:10,942 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aece3e8de7c247ba8daa671f8c8b3259 as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aece3e8de7c247ba8daa671f8c8b3259 2024-12-06T15:54:10,950 INFO [M:0;85bef17d9292:42661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aece3e8de7c247ba8daa671f8c8b3259, entries=8, sequenceid=72, filesize=5.5 K 2024-12-06T15:54:10,951 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25c3d79a8d5c4404829e6cb8c5333563 as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/25c3d79a8d5c4404829e6cb8c5333563 2024-12-06T15:54:10,958 INFO [M:0;85bef17d9292:42661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/25c3d79a8d5c4404829e6cb8c5333563, entries=8, sequenceid=72, filesize=6.3 K 2024-12-06T15:54:10,960 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e9fb48af52e24921b27d0b91e2e41980 as hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e9fb48af52e24921b27d0b91e2e41980 2024-12-06T15:54:10,967 INFO [M:0;85bef17d9292:42661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e9fb48af52e24921b27d0b91e2e41980, entries=3, sequenceid=72, filesize=5.2 K 2024-12-06T15:54:10,969 INFO [M:0;85bef17d9292:42661 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=72, compaction requested=false 2024-12-06T15:54:10,970 INFO [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:10,971 DEBUG [M:0;85bef17d9292:42661 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733500450822Disabling compacts and flushes for region at 1733500450822Disabling writes for close at 1733500450822Obtaining lock to block concurrent updates at 1733500450823 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733500450823Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733500450823Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733500450824 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733500450825 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733500450850 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733500450850Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733500450870 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733500450888 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733500450888Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733500450907 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733500450923 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733500450923Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79fbe35e: reopening flushed file at 1733500450941 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f046c48: reopening flushed file at 1733500450950 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fa6da8f: reopening flushed file at 1733500450959 (+9 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=72, compaction requested=false at 1733500450969 (+10 ms)Writing region close event to WAL at 1733500450970 (+1 ms)Closed at 1733500450970 2024-12-06T15:54:10,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741825_1011 (size=32665) 2024-12-06T15:54:10,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33463 is added to blk_1073741825_1011 (size=32665) 2024-12-06T15:54:10,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38889 is added to blk_1073741825_1011 (size=32665) 2024-12-06T15:54:10,976 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:10,976 INFO [M:0;85bef17d9292:42661 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T15:54:10,976 INFO [M:0;85bef17d9292:42661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42661 2024-12-06T15:54:10,976 INFO [M:0;85bef17d9292:42661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:11,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:11,079 INFO [M:0;85bef17d9292:42661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:11,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42661-0x100680958f00000, quorum=127.0.0.1:62826, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:11,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:11,088 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:11,088 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:11,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:11,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:11,091 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T15:54:11,091 WARN [BP-1561492056-172.17.0.2-1733500442151 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T15:54:11,091 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T15:54:11,091 WARN [BP-1561492056-172.17.0.2-1733500442151 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1561492056-172.17.0.2-1733500442151 (Datanode Uuid d113bed3-53f6-4770-b39f-4a6bee037dce) service to localhost/127.0.0.1:40849 2024-12-06T15:54:11,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data5/current/BP-1561492056-172.17.0.2-1733500442151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:11,093 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data6/current/BP-1561492056-172.17.0.2-1733500442151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:11,093 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T15:54:11,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:11,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:11,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:11,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:11,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:11,104 WARN [BP-1561492056-172.17.0.2-1733500442151 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T15:54:11,104 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T15:54:11,104 WARN [BP-1561492056-172.17.0.2-1733500442151 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1561492056-172.17.0.2-1733500442151 (Datanode Uuid 70a35f20-8dd5-437c-b4a1-eb06be178832) service to localhost/127.0.0.1:40849 2024-12-06T15:54:11,104 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T15:54:11,104 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data3/current/BP-1561492056-172.17.0.2-1733500442151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:11,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data4/current/BP-1561492056-172.17.0.2-1733500442151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:11,105 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T15:54:11,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:11,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:11,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:11,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:11,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:11,109 WARN [BP-1561492056-172.17.0.2-1733500442151 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T15:54:11,110 WARN [BP-1561492056-172.17.0.2-1733500442151 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1561492056-172.17.0.2-1733500442151 (Datanode Uuid 91c46be6-ebf4-43d1-af0e-0c4c0673d55d) service to localhost/127.0.0.1:40849 2024-12-06T15:54:11,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data1/current/BP-1561492056-172.17.0.2-1733500442151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:11,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/cluster_64616e74-77ab-5946-7884-d112f7cf2148/data/data2/current/BP-1561492056-172.17.0.2-1733500442151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:11,111 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T15:54:11,111 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T15:54:11,111 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T15:54:11,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T15:54:11,121 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:11,121 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:11,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:11,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:11,131 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T15:54:11,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T15:54:11,178 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=449 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=249 (was 253), ProcessCount=11 (was 11), AvailableMemoryMB=9297 (was 9600) 2024-12-06T15:54:11,186 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=249, ProcessCount=11, AvailableMemoryMB=9297 2024-12-06T15:54:11,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.log.dir so I do NOT create it in target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7f994d9c-44e8-8f19-6358-34f57b1d3f26/hadoop.tmp.dir so I do NOT create it in target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455, deleteOnExit=true 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/test.cache.data in system properties and HBase conf 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir in system properties and HBase conf 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T15:54:11,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T15:54:11,188 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T15:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/nfs.dump.dir in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/java.io.tmpdir in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T15:54:11,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T15:54:11,276 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:11,281 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:11,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:11,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:11,284 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T15:54:11,285 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:11,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49b2b984{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:11,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@715f09c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:11,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b25f894{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/java.io.tmpdir/jetty-localhost-39283-hadoop-hdfs-3_4_1-tests_jar-_-any-8180956807840283897/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T15:54:11,415 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53797dc3{HTTP/1.1, (http/1.1)}{localhost:39283} 2024-12-06T15:54:11,415 INFO [Time-limited test {}] server.Server(415): Started @11346ms 2024-12-06T15:54:11,524 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:11,531 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:11,532 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:11,532 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:11,532 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T15:54:11,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a6744cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:11,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c2dd4e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:11,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14abb266{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/java.io.tmpdir/jetty-localhost-35367-hadoop-hdfs-3_4_1-tests_jar-_-any-17151671196228106425/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:11,651 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@348ccaab{HTTP/1.1, (http/1.1)}{localhost:35367} 2024-12-06T15:54:11,651 INFO [Time-limited test {}] server.Server(415): Started @11581ms 2024-12-06T15:54:11,652 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T15:54:11,697 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:11,700 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:11,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:11,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:11,701 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T15:54:11,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69cb0b1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:11,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f8ad177{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:11,766 WARN [Thread-530 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data1/current/BP-1078682521-172.17.0.2-1733500451221/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:11,767 WARN [Thread-531 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data2/current/BP-1078682521-172.17.0.2-1733500451221/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:11,793 WARN [Thread-509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T15:54:11,796 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x827aa25eb8a7cd61 with lease ID 0xc97334bef6da30c5: Processing first storage report for DS-25e9c44b-b089-4741-ac6c-ec1c8e831097 from datanode DatanodeRegistration(127.0.0.1:42625, datanodeUuid=d5c72180-5c32-4d40-8f69-0c3d6e7df1e1, infoPort=33375, infoSecurePort=0, ipcPort=40747, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221) 2024-12-06T15:54:11,796 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x827aa25eb8a7cd61 with lease ID 0xc97334bef6da30c5: from storage DS-25e9c44b-b089-4741-ac6c-ec1c8e831097 node DatanodeRegistration(127.0.0.1:42625, datanodeUuid=d5c72180-5c32-4d40-8f69-0c3d6e7df1e1, infoPort=33375, infoSecurePort=0, ipcPort=40747, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:11,797 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x827aa25eb8a7cd61 with lease ID 0xc97334bef6da30c5: Processing first storage report for DS-849f5c2f-9479-4d3a-b35c-7b1900c6d61a from datanode DatanodeRegistration(127.0.0.1:42625, datanodeUuid=d5c72180-5c32-4d40-8f69-0c3d6e7df1e1, infoPort=33375, infoSecurePort=0, ipcPort=40747, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221) 2024-12-06T15:54:11,797 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x827aa25eb8a7cd61 with lease ID 0xc97334bef6da30c5: from storage DS-849f5c2f-9479-4d3a-b35c-7b1900c6d61a node DatanodeRegistration(127.0.0.1:42625, datanodeUuid=d5c72180-5c32-4d40-8f69-0c3d6e7df1e1, infoPort=33375, infoSecurePort=0, ipcPort=40747, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:11,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@609afea4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/java.io.tmpdir/jetty-localhost-43135-hadoop-hdfs-3_4_1-tests_jar-_-any-13896990780339095685/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:11,819 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@527d4f04{HTTP/1.1, (http/1.1)}{localhost:43135} 2024-12-06T15:54:11,819 INFO [Time-limited test {}] server.Server(415): Started @11749ms 2024-12-06T15:54:11,820 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T15:54:11,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T15:54:11,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T15:54:11,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T15:54:11,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T15:54:11,859 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T15:54:11,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7adc0795{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,AVAILABLE} 2024-12-06T15:54:11,860 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@739551bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T15:54:11,913 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data3/current/BP-1078682521-172.17.0.2-1733500451221/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:11,914 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data4/current/BP-1078682521-172.17.0.2-1733500451221/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:11,931 WARN [Thread-545 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T15:54:11,935 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d25b1a0814243cf with lease ID 0xc97334bef6da30c6: Processing first storage report for DS-73bca483-367b-422e-abb0-4c5876567ab3 from datanode DatanodeRegistration(127.0.0.1:33193, datanodeUuid=b5652060-87ee-486d-8b83-0eccf5c09641, infoPort=41717, infoSecurePort=0, ipcPort=41207, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221) 2024-12-06T15:54:11,935 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d25b1a0814243cf with lease ID 0xc97334bef6da30c6: from storage DS-73bca483-367b-422e-abb0-4c5876567ab3 node DatanodeRegistration(127.0.0.1:33193, datanodeUuid=b5652060-87ee-486d-8b83-0eccf5c09641, infoPort=41717, infoSecurePort=0, ipcPort=41207, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:11,935 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d25b1a0814243cf with lease ID 0xc97334bef6da30c6: Processing first storage report for DS-762d5be9-287c-4e48-a052-6324c8392c87 from datanode DatanodeRegistration(127.0.0.1:33193, datanodeUuid=b5652060-87ee-486d-8b83-0eccf5c09641, infoPort=41717, infoSecurePort=0, ipcPort=41207, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221) 2024-12-06T15:54:11,935 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d25b1a0814243cf with lease ID 0xc97334bef6da30c6: from storage DS-762d5be9-287c-4e48-a052-6324c8392c87 node DatanodeRegistration(127.0.0.1:33193, datanodeUuid=b5652060-87ee-486d-8b83-0eccf5c09641, infoPort=41717, infoSecurePort=0, ipcPort=41207, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:11,980 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25c162fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/java.io.tmpdir/jetty-localhost-45615-hadoop-hdfs-3_4_1-tests_jar-_-any-16597681971792129513/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:11,981 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1da1746e{HTTP/1.1, (http/1.1)}{localhost:45615} 2024-12-06T15:54:11,981 INFO [Time-limited test {}] server.Server(415): Started @11911ms 2024-12-06T15:54:11,983 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T15:54:12,091 WARN [Thread-592 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data6/current/BP-1078682521-172.17.0.2-1733500451221/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:12,092 WARN [Thread-591 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data5/current/BP-1078682521-172.17.0.2-1733500451221/current, will proceed with Du for space computation calculation, 2024-12-06T15:54:12,110 WARN [Thread-580 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T15:54:12,113 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x34ce27958954999c with lease ID 0xc97334bef6da30c7: Processing first storage report for DS-5a29ad95-95ef-4912-8e48-a910989e75a8 from datanode DatanodeRegistration(127.0.0.1:34481, datanodeUuid=dea8f4c0-72bb-4080-a1e3-3f65858921dd, infoPort=44845, infoSecurePort=0, ipcPort=46527, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221) 2024-12-06T15:54:12,113 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x34ce27958954999c with lease ID 0xc97334bef6da30c7: from storage DS-5a29ad95-95ef-4912-8e48-a910989e75a8 node DatanodeRegistration(127.0.0.1:34481, datanodeUuid=dea8f4c0-72bb-4080-a1e3-3f65858921dd, infoPort=44845, infoSecurePort=0, ipcPort=46527, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:12,113 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x34ce27958954999c with lease ID 0xc97334bef6da30c7: Processing first storage report for DS-3e43ba64-a2c1-4bcf-bac0-99d4a48bcbc5 from datanode DatanodeRegistration(127.0.0.1:34481, datanodeUuid=dea8f4c0-72bb-4080-a1e3-3f65858921dd, infoPort=44845, infoSecurePort=0, ipcPort=46527, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221) 2024-12-06T15:54:12,113 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x34ce27958954999c with lease ID 0xc97334bef6da30c7: from storage DS-3e43ba64-a2c1-4bcf-bac0-99d4a48bcbc5 node DatanodeRegistration(127.0.0.1:34481, datanodeUuid=dea8f4c0-72bb-4080-a1e3-3f65858921dd, infoPort=44845, infoSecurePort=0, ipcPort=46527, storageInfo=lv=-57;cid=testClusterID;nsid=913692152;c=1733500451221), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T15:54:12,117 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5 2024-12-06T15:54:12,121 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/zookeeper_0, clientPort=54026, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T15:54:12,122 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54026 2024-12-06T15:54:12,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,125 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741825_1001 (size=7) 2024-12-06T15:54:12,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741825_1001 (size=7) 2024-12-06T15:54:12,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741825_1001 (size=7) 2024-12-06T15:54:12,141 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485 with version=8 2024-12-06T15:54:12,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40849/user/jenkins/test-data/985acda1-e3ca-4846-ebe0-f5f403d48972/hbase-staging 2024-12-06T15:54:12,144 INFO [Time-limited test {}] client.ConnectionUtils(128): master/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:12,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,144 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:12,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:12,144 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T15:54:12,145 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:12,145 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36243 2024-12-06T15:54:12,147 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36243 connecting to ZooKeeper ensemble=127.0.0.1:54026 2024-12-06T15:54:12,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362430x0, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:12,153 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36243-0x100680975fe0000 connected 2024-12-06T15:54:12,176 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:12,180 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485, hbase.cluster.distributed=false 2024-12-06T15:54:12,182 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:12,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36243 2024-12-06T15:54:12,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36243 2024-12-06T15:54:12,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36243 2024-12-06T15:54:12,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36243 2024-12-06T15:54:12,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36243 2024-12-06T15:54:12,199 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:12,199 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,199 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,200 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:12,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:12,200 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T15:54:12,200 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:12,201 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44761 2024-12-06T15:54:12,202 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44761 connecting to ZooKeeper ensemble=127.0.0.1:54026 2024-12-06T15:54:12,203 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447610x0, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:12,210 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44761-0x100680975fe0001 connected 2024-12-06T15:54:12,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:12,211 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T15:54:12,211 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T15:54:12,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T15:54:12,213 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:12,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44761 2024-12-06T15:54:12,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44761 2024-12-06T15:54:12,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44761 2024-12-06T15:54:12,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44761 2024-12-06T15:54:12,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44761 2024-12-06T15:54:12,230 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T15:54:12,230 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:12,231 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35415 2024-12-06T15:54:12,232 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35415 connecting to ZooKeeper ensemble=127.0.0.1:54026 2024-12-06T15:54:12,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354150x0, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:12,240 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:354150x0, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:12,240 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35415-0x100680975fe0002 connected 2024-12-06T15:54:12,241 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T15:54:12,244 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T15:54:12,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T15:54:12,246 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:12,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35415 2024-12-06T15:54:12,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35415 2024-12-06T15:54:12,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35415 2024-12-06T15:54:12,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35415 2024-12-06T15:54:12,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35415 2024-12-06T15:54:12,271 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/85bef17d9292:0 server-side Connection retries=45 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T15:54:12,272 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T15:54:12,273 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33825 2024-12-06T15:54:12,274 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33825 connecting to ZooKeeper ensemble=127.0.0.1:54026 2024-12-06T15:54:12,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338250x0, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T15:54:12,282 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33825-0x100680975fe0003 connected 2024-12-06T15:54:12,282 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:12,282 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T15:54:12,283 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T15:54:12,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T15:54:12,284 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T15:54:12,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33825 2024-12-06T15:54:12,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33825 2024-12-06T15:54:12,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33825 2024-12-06T15:54:12,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33825 2024-12-06T15:54:12,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33825 2024-12-06T15:54:12,298 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;85bef17d9292:36243 2024-12-06T15:54:12,298 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,301 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,304 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T15:54:12,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:12,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,304 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/85bef17d9292,36243,1733500452143 from backup master directory 2024-12-06T15:54:12,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,306 WARN [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:12,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T15:54:12,306 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,315 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/hbase.id] with ID: 9e762204-2b1b-4d97-a393-7efb59553d9e 2024-12-06T15:54:12,315 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/.tmp/hbase.id 2024-12-06T15:54:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741826_1002 (size=42) 2024-12-06T15:54:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741826_1002 (size=42) 2024-12-06T15:54:12,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741826_1002 (size=42) 2024-12-06T15:54:12,328 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/.tmp/hbase.id]:[hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/hbase.id] 2024-12-06T15:54:12,350 INFO [master/85bef17d9292:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T15:54:12,351 INFO [master/85bef17d9292:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T15:54:12,353 INFO [master/85bef17d9292:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-06T15:54:12,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741827_1003 (size=196) 2024-12-06T15:54:12,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741827_1003 (size=196) 2024-12-06T15:54:12,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741827_1003 (size=196) 2024-12-06T15:54:12,373 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T15:54:12,373 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T15:54:12,374 INFO [master/85bef17d9292:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T15:54:12,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741828_1004 (size=1189) 2024-12-06T15:54:12,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741828_1004 (size=1189) 2024-12-06T15:54:12,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741828_1004 (size=1189) 2024-12-06T15:54:12,388 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store 2024-12-06T15:54:12,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741829_1005 (size=34) 2024-12-06T15:54:12,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741829_1005 (size=34) 2024-12-06T15:54:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741829_1005 (size=34) 2024-12-06T15:54:12,401 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:12,401 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T15:54:12,401 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:12,401 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:12,401 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T15:54:12,401 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:12,401 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:12,401 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733500452401Disabling compacts and flushes for region at 1733500452401Disabling writes for close at 1733500452401Writing region close event to WAL at 1733500452401Closed at 1733500452401 2024-12-06T15:54:12,402 WARN [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/.initializing 2024-12-06T15:54:12,402 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/WALs/85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,407 INFO [master/85bef17d9292:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C36243%2C1733500452143, suffix=, logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/WALs/85bef17d9292,36243,1733500452143, archiveDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/oldWALs, maxLogs=10 2024-12-06T15:54:12,408 INFO [master/85bef17d9292:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 85bef17d9292%2C36243%2C1733500452143.1733500452408 2024-12-06T15:54:12,422 INFO [master/85bef17d9292:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/WALs/85bef17d9292,36243,1733500452143/85bef17d9292%2C36243%2C1733500452143.1733500452408 2024-12-06T15:54:12,426 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33375:33375),(127.0.0.1/127.0.0.1:41717:41717),(127.0.0.1/127.0.0.1:44845:44845)] 2024-12-06T15:54:12,429 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T15:54:12,429 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:12,429 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,430 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T15:54:12,434 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:12,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T15:54:12,437 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:12,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T15:54:12,441 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,442 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:12,442 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,444 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T15:54:12,444 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:12,445 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,446 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,446 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,448 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,448 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,449 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T15:54:12,450 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T15:54:12,453 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T15:54:12,453 INFO [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75021803, jitterRate=0.11791197955608368}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T15:54:12,454 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733500452430Initializing all the Stores at 1733500452431 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500452431Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500452432 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500452432Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500452432Cleaning up temporary data from old regions at 1733500452448 (+16 ms)Region opened successfully at 1733500452454 (+6 ms) 2024-12-06T15:54:12,454 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T15:54:12,459 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42081378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:12,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T15:54:12,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T15:54:12,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T15:54:12,461 INFO [master/85bef17d9292:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T15:54:12,462 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T15:54:12,463 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T15:54:12,463 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T15:54:12,466 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T15:54:12,467 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T15:54:12,468 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T15:54:12,469 INFO [master/85bef17d9292:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T15:54:12,469 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T15:54:12,471 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T15:54:12,471 INFO [master/85bef17d9292:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T15:54:12,472 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T15:54:12,473 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T15:54:12,474 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T15:54:12,476 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T15:54:12,478 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T15:54:12,480 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,485 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=85bef17d9292,36243,1733500452143, sessionid=0x100680975fe0000, setting cluster-up flag (Was=false) 2024-12-06T15:54:12,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,494 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T15:54:12,496 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,506 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T15:54:12,507 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=85bef17d9292,36243,1733500452143 2024-12-06T15:54:12,509 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T15:54:12,512 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:12,512 INFO [master/85bef17d9292:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T15:54:12,512 INFO [master/85bef17d9292:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T15:54:12,513 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 85bef17d9292,36243,1733500452143 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/85bef17d9292:0, corePoolSize=5, maxPoolSize=5 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/85bef17d9292:0, corePoolSize=10, maxPoolSize=10 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,514 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:12,515 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733500482519 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T15:54:12,519 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,521 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:12,521 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T15:54:12,522 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,522 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T15:54:12,524 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T15:54:12,524 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T15:54:12,524 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T15:54:12,525 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T15:54:12,525 INFO [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T15:54:12,525 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.large.0-1733500452525,5,FailOnTimeoutGroup] 2024-12-06T15:54:12,525 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.small.0-1733500452525,5,FailOnTimeoutGroup] 2024-12-06T15:54:12,525 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,525 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T15:54:12,525 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,526 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741831_1007 (size=1321) 2024-12-06T15:54:12,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741831_1007 (size=1321) 2024-12-06T15:54:12,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741831_1007 (size=1321) 2024-12-06T15:54:12,540 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T15:54:12,540 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485 2024-12-06T15:54:12,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741832_1008 (size=32) 2024-12-06T15:54:12,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741832_1008 (size=32) 2024-12-06T15:54:12,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741832_1008 (size=32) 2024-12-06T15:54:12,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:12,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T15:54:12,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T15:54:12,563 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:12,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T15:54:12,565 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T15:54:12,565 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:12,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T15:54:12,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T15:54:12,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:12,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T15:54:12,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T15:54:12,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:12,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:12,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T15:54:12,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740 2024-12-06T15:54:12,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740 2024-12-06T15:54:12,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T15:54:12,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T15:54:12,576 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T15:54:12,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T15:54:12,580 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T15:54:12,581 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62911032, jitterRate=-0.0625525712966919}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T15:54:12,582 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733500452558Initializing all the Stores at 1733500452561 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500452561Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500452561Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500452561Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500452561Cleaning up temporary data from old regions at 1733500452575 (+14 ms)Region opened successfully at 1733500452582 (+7 ms) 2024-12-06T15:54:12,582 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T15:54:12,582 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T15:54:12,582 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T15:54:12,582 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T15:54:12,582 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T15:54:12,582 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T15:54:12,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733500452582Disabling compacts and flushes for region at 1733500452582Disabling writes for close at 1733500452582Writing region close event to WAL at 1733500452582Closed at 1733500452582 2024-12-06T15:54:12,585 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:12,585 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T15:54:12,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T15:54:12,587 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T15:54:12,589 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T15:54:12,589 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(746): ClusterId : 9e762204-2b1b-4d97-a393-7efb59553d9e 2024-12-06T15:54:12,589 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T15:54:12,591 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(746): ClusterId : 9e762204-2b1b-4d97-a393-7efb59553d9e 2024-12-06T15:54:12,592 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T15:54:12,592 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T15:54:12,592 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T15:54:12,595 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(746): ClusterId : 9e762204-2b1b-4d97-a393-7efb59553d9e 2024-12-06T15:54:12,595 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T15:54:12,596 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T15:54:12,596 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T15:54:12,597 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T15:54:12,597 DEBUG [RS:0;85bef17d9292:44761 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8f2b44e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:12,598 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T15:54:12,598 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T15:54:12,599 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T15:54:12,599 DEBUG [RS:2;85bef17d9292:33825 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4139ee18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:12,601 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T15:54:12,602 DEBUG [RS:1;85bef17d9292:35415 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d22b101, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=85bef17d9292/172.17.0.2:0 2024-12-06T15:54:12,616 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;85bef17d9292:35415 2024-12-06T15:54:12,617 INFO [RS:1;85bef17d9292:35415 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T15:54:12,617 INFO [RS:1;85bef17d9292:35415 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T15:54:12,617 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T15:54:12,617 DEBUG [RS:2;85bef17d9292:33825 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;85bef17d9292:33825 2024-12-06T15:54:12,617 DEBUG [RS:0;85bef17d9292:44761 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;85bef17d9292:44761 2024-12-06T15:54:12,618 INFO [RS:0;85bef17d9292:44761 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T15:54:12,618 INFO [RS:2;85bef17d9292:33825 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T15:54:12,618 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(2659): reportForDuty to master=85bef17d9292,36243,1733500452143 with port=35415, startcode=1733500452229 2024-12-06T15:54:12,618 INFO [RS:2;85bef17d9292:33825 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T15:54:12,618 INFO [RS:0;85bef17d9292:44761 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T15:54:12,618 DEBUG [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T15:54:12,618 DEBUG [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T15:54:12,618 DEBUG [RS:1;85bef17d9292:35415 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T15:54:12,618 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(2659): reportForDuty to master=85bef17d9292,36243,1733500452143 with port=44761, startcode=1733500452199 2024-12-06T15:54:12,619 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(2659): reportForDuty to master=85bef17d9292,36243,1733500452143 with port=33825, startcode=1733500452271 2024-12-06T15:54:12,619 DEBUG [RS:0;85bef17d9292:44761 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T15:54:12,619 DEBUG [RS:2;85bef17d9292:33825 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T15:54:12,621 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42825, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T15:54:12,621 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,621 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36243 {}] master.ServerManager(517): Registering regionserver=85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,621 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53167, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T15:54:12,621 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49043, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T15:54:12,623 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 85bef17d9292,33825,1733500452271 2024-12-06T15:54:12,623 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36243 {}] master.ServerManager(517): Registering regionserver=85bef17d9292,33825,1733500452271 2024-12-06T15:54:12,624 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485 2024-12-06T15:54:12,624 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40973 2024-12-06T15:54:12,624 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T15:54:12,625 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 85bef17d9292,44761,1733500452199 2024-12-06T15:54:12,626 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36243 {}] master.ServerManager(517): Registering regionserver=85bef17d9292,44761,1733500452199 2024-12-06T15:54:12,626 DEBUG [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485 2024-12-06T15:54:12,626 DEBUG [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40973 2024-12-06T15:54:12,626 DEBUG [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T15:54:12,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:12,627 DEBUG [RS:1;85bef17d9292:35415 {}] zookeeper.ZKUtil(111): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,627 WARN [RS:1;85bef17d9292:35415 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:12,627 INFO [RS:1;85bef17d9292:35415 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T15:54:12,627 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,628 DEBUG [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485 2024-12-06T15:54:12,628 DEBUG [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40973 2024-12-06T15:54:12,628 DEBUG [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T15:54:12,632 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [85bef17d9292,35415,1733500452229] 2024-12-06T15:54:12,632 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [85bef17d9292,33825,1733500452271] 2024-12-06T15:54:12,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:12,633 DEBUG [RS:2;85bef17d9292:33825 {}] zookeeper.ZKUtil(111): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/85bef17d9292,33825,1733500452271 2024-12-06T15:54:12,633 WARN [RS:2;85bef17d9292:33825 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:12,633 INFO [RS:2;85bef17d9292:33825 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T15:54:12,633 DEBUG [RS:0;85bef17d9292:44761 {}] zookeeper.ZKUtil(111): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/85bef17d9292,44761,1733500452199 2024-12-06T15:54:12,633 DEBUG [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,33825,1733500452271 2024-12-06T15:54:12,633 WARN [RS:0;85bef17d9292:44761 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T15:54:12,633 INFO [RS:0;85bef17d9292:44761 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T15:54:12,633 DEBUG [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,44761,1733500452199 2024-12-06T15:54:12,635 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [85bef17d9292,44761,1733500452199] 2024-12-06T15:54:12,635 INFO [RS:1;85bef17d9292:35415 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T15:54:12,637 INFO [RS:1;85bef17d9292:35415 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T15:54:12,638 INFO [RS:1;85bef17d9292:35415 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T15:54:12,638 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,638 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T15:54:12,639 INFO [RS:2;85bef17d9292:33825 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T15:54:12,641 INFO [RS:0;85bef17d9292:44761 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T15:54:12,641 INFO [RS:2;85bef17d9292:33825 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T15:54:12,642 INFO [RS:1;85bef17d9292:35415 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T15:54:12,642 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,642 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,643 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,643 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,643 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,643 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,643 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:12,643 DEBUG [RS:1;85bef17d9292:35415 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:12,651 INFO [RS:2;85bef17d9292:33825 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T15:54:12,651 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,651 INFO [RS:0;85bef17d9292:44761 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T15:54:12,651 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T15:54:12,651 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:0;85bef17d9292:44761 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T15:54:12,652 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,35415,1733500452229-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 INFO [RS:2;85bef17d9292:33825 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T15:54:12,652 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,652 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,653 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:12,654 DEBUG [RS:2;85bef17d9292:33825 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:12,655 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T15:54:12,656 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,656 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,656 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,656 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,656 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,656 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,33825,1733500452271-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:12,656 INFO [RS:0;85bef17d9292:44761 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T15:54:12,657 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/85bef17d9292:0, corePoolSize=2, maxPoolSize=2 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,657 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,658 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,658 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/85bef17d9292:0, corePoolSize=1, maxPoolSize=1 2024-12-06T15:54:12,658 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:12,658 DEBUG [RS:0;85bef17d9292:44761 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0, corePoolSize=3, maxPoolSize=3 2024-12-06T15:54:12,662 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,662 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,662 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,662 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,662 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,662 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,44761,1733500452199-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:12,674 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T15:54:12,674 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,35415,1733500452229-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,674 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,674 INFO [RS:1;85bef17d9292:35415 {}] regionserver.Replication(171): 85bef17d9292,35415,1733500452229 started 2024-12-06T15:54:12,678 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T15:54:12,678 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,33825,1733500452271-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,678 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,678 INFO [RS:2;85bef17d9292:33825 {}] regionserver.Replication(171): 85bef17d9292,33825,1733500452271 started 2024-12-06T15:54:12,685 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T15:54:12,685 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,44761,1733500452199-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,685 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,685 INFO [RS:0;85bef17d9292:44761 {}] regionserver.Replication(171): 85bef17d9292,44761,1733500452199 started 2024-12-06T15:54:12,695 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,696 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1482): Serving as 85bef17d9292,35415,1733500452229, RpcServer on 85bef17d9292/172.17.0.2:35415, sessionid=0x100680975fe0002 2024-12-06T15:54:12,696 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T15:54:12,696 DEBUG [RS:1;85bef17d9292:35415 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,696 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,35415,1733500452229' 2024-12-06T15:54:12,696 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T15:54:12,697 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T15:54:12,697 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T15:54:12,697 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T15:54:12,697 DEBUG [RS:1;85bef17d9292:35415 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,698 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,35415,1733500452229' 2024-12-06T15:54:12,698 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T15:54:12,698 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T15:54:12,698 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,698 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(1482): Serving as 85bef17d9292,33825,1733500452271, RpcServer on 85bef17d9292/172.17.0.2:33825, sessionid=0x100680975fe0003 2024-12-06T15:54:12,699 DEBUG [RS:1;85bef17d9292:35415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T15:54:12,699 INFO [RS:1;85bef17d9292:35415 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T15:54:12,699 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T15:54:12,699 INFO [RS:1;85bef17d9292:35415 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T15:54:12,699 DEBUG [RS:2;85bef17d9292:33825 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 85bef17d9292,33825,1733500452271 2024-12-06T15:54:12,699 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,33825,1733500452271' 2024-12-06T15:54:12,699 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T15:54:12,699 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T15:54:12,700 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T15:54:12,700 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T15:54:12,700 DEBUG [RS:2;85bef17d9292:33825 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 85bef17d9292,33825,1733500452271 2024-12-06T15:54:12,700 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,33825,1733500452271' 2024-12-06T15:54:12,700 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T15:54:12,700 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T15:54:12,701 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:12,701 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(1482): Serving as 85bef17d9292,44761,1733500452199, RpcServer on 85bef17d9292/172.17.0.2:44761, sessionid=0x100680975fe0001 2024-12-06T15:54:12,701 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T15:54:12,701 DEBUG [RS:0;85bef17d9292:44761 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 85bef17d9292,44761,1733500452199 2024-12-06T15:54:12,701 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,44761,1733500452199' 2024-12-06T15:54:12,701 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T15:54:12,701 DEBUG [RS:2;85bef17d9292:33825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T15:54:12,701 INFO [RS:2;85bef17d9292:33825 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T15:54:12,701 INFO [RS:2;85bef17d9292:33825 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T15:54:12,702 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T15:54:12,702 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T15:54:12,702 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T15:54:12,702 DEBUG [RS:0;85bef17d9292:44761 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 85bef17d9292,44761,1733500452199 2024-12-06T15:54:12,702 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '85bef17d9292,44761,1733500452199' 2024-12-06T15:54:12,702 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T15:54:12,703 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T15:54:12,703 DEBUG [RS:0;85bef17d9292:44761 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T15:54:12,703 INFO [RS:0;85bef17d9292:44761 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T15:54:12,703 INFO [RS:0;85bef17d9292:44761 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T15:54:12,739 WARN [85bef17d9292:36243 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T15:54:12,802 INFO [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C35415%2C1733500452229, suffix=, logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,35415,1733500452229, archiveDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs, maxLogs=32 2024-12-06T15:54:12,804 INFO [RS:1;85bef17d9292:35415 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 85bef17d9292%2C35415%2C1733500452229.1733500452803 2024-12-06T15:54:12,804 INFO [RS:2;85bef17d9292:33825 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C33825%2C1733500452271, suffix=, logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,33825,1733500452271, archiveDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs, maxLogs=32 2024-12-06T15:54:12,806 INFO [RS:0;85bef17d9292:44761 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C44761%2C1733500452199, suffix=, logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,44761,1733500452199, archiveDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs, maxLogs=32 2024-12-06T15:54:12,806 INFO [RS:2;85bef17d9292:33825 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 85bef17d9292%2C33825%2C1733500452271.1733500452806 2024-12-06T15:54:12,808 INFO [RS:0;85bef17d9292:44761 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 85bef17d9292%2C44761%2C1733500452199.1733500452807 2024-12-06T15:54:12,816 INFO [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,35415,1733500452229/85bef17d9292%2C35415%2C1733500452229.1733500452803 2024-12-06T15:54:12,818 INFO [RS:2;85bef17d9292:33825 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,33825,1733500452271/85bef17d9292%2C33825%2C1733500452271.1733500452806 2024-12-06T15:54:12,819 DEBUG [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41717:41717),(127.0.0.1/127.0.0.1:44845:44845),(127.0.0.1/127.0.0.1:33375:33375)] 2024-12-06T15:54:12,820 INFO [RS:0;85bef17d9292:44761 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,44761,1733500452199/85bef17d9292%2C44761%2C1733500452199.1733500452807 2024-12-06T15:54:12,820 DEBUG [RS:2;85bef17d9292:33825 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41717:41717),(127.0.0.1/127.0.0.1:44845:44845),(127.0.0.1/127.0.0.1:33375:33375)] 2024-12-06T15:54:12,822 DEBUG [RS:0;85bef17d9292:44761 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44845:44845),(127.0.0.1/127.0.0.1:33375:33375),(127.0.0.1/127.0.0.1:41717:41717)] 2024-12-06T15:54:12,989 DEBUG [85bef17d9292:36243 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-06T15:54:12,990 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(204): Hosts are {85bef17d9292=0} racks are {/default-rack=0} 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T15:54:12,992 INFO [85bef17d9292:36243 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T15:54:12,992 INFO [85bef17d9292:36243 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T15:54:12,992 INFO [85bef17d9292:36243 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T15:54:12,992 DEBUG [85bef17d9292:36243 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T15:54:12,993 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=85bef17d9292,35415,1733500452229 2024-12-06T15:54:12,995 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 85bef17d9292,35415,1733500452229, state=OPENING 2024-12-06T15:54:12,997 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T15:54:12,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:12,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:13,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,000 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T15:54:13,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,000 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=85bef17d9292,35415,1733500452229}] 2024-12-06T15:54:13,154 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T15:54:13,156 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60395, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T15:54:13,161 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T15:54:13,161 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T15:54:13,164 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=85bef17d9292%2C35415%2C1733500452229.meta, suffix=.meta, logDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,35415,1733500452229, archiveDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs, maxLogs=32 2024-12-06T15:54:13,165 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 85bef17d9292%2C35415%2C1733500452229.meta.1733500453165.meta 2024-12-06T15:54:13,176 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/WALs/85bef17d9292,35415,1733500452229/85bef17d9292%2C35415%2C1733500452229.meta.1733500453165.meta 2024-12-06T15:54:13,180 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41717:41717),(127.0.0.1/127.0.0.1:33375:33375),(127.0.0.1/127.0.0.1:44845:44845)] 2024-12-06T15:54:13,185 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T15:54:13,185 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T15:54:13,185 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T15:54:13,185 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T15:54:13,186 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T15:54:13,186 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:13,186 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T15:54:13,186 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T15:54:13,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T15:54:13,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T15:54:13,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:13,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:13,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T15:54:13,191 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T15:54:13,191 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:13,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:13,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T15:54:13,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T15:54:13,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:13,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:13,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T15:54:13,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T15:54:13,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:13,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T15:54:13,195 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T15:54:13,196 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740 2024-12-06T15:54:13,197 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740 2024-12-06T15:54:13,199 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T15:54:13,199 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T15:54:13,200 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T15:54:13,202 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T15:54:13,203 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59173047, jitterRate=-0.11825288832187653}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T15:54:13,203 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T15:54:13,205 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733500453186Writing region info on filesystem at 1733500453186Initializing all the Stores at 1733500453187 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500453187Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500453188 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500453188Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733500453188Cleaning up temporary data from old regions at 1733500453199 (+11 ms)Running coprocessor post-open hooks at 1733500453203 (+4 ms)Region opened successfully at 1733500453204 (+1 ms) 2024-12-06T15:54:13,206 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733500453154 2024-12-06T15:54:13,210 DEBUG [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T15:54:13,210 INFO [RS_OPEN_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T15:54:13,211 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=85bef17d9292,35415,1733500452229 2024-12-06T15:54:13,213 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 85bef17d9292,35415,1733500452229, state=OPEN 2024-12-06T15:54:13,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:13,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:13,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:13,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T15:54:13,215 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=85bef17d9292,35415,1733500452229 2024-12-06T15:54:13,215 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,215 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,215 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,215 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T15:54:13,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T15:54:13,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=85bef17d9292,35415,1733500452229 in 215 msec 2024-12-06T15:54:13,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T15:54:13,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 634 msec 2024-12-06T15:54:13,225 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T15:54:13,225 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T15:54:13,226 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T15:54:13,226 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=85bef17d9292,35415,1733500452229, seqNum=-1] 2024-12-06T15:54:13,227 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T15:54:13,229 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45039, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T15:54:13,238 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 726 msec 2024-12-06T15:54:13,238 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733500453238, completionTime=-1 2024-12-06T15:54:13,238 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-06T15:54:13,238 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T15:54:13,240 INFO [master/85bef17d9292:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-06T15:54:13,240 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733500513240 2024-12-06T15:54:13,241 INFO [master/85bef17d9292:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733500573241 2024-12-06T15:54:13,241 INFO [master/85bef17d9292:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-06T15:54:13,241 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-06T15:54:13,241 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,36243,1733500452143-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,242 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,36243,1733500452143-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,242 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,36243,1733500452143-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,242 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-85bef17d9292:36243, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,242 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,242 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,244 DEBUG [master/85bef17d9292:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T15:54:13,247 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.940sec 2024-12-06T15:54:13,247 INFO [master/85bef17d9292:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T15:54:13,247 INFO [master/85bef17d9292:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T15:54:13,247 INFO [master/85bef17d9292:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T15:54:13,247 INFO [master/85bef17d9292:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T15:54:13,248 INFO [master/85bef17d9292:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T15:54:13,248 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,36243,1733500452143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T15:54:13,248 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,36243,1733500452143-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T15:54:13,250 DEBUG [master/85bef17d9292:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T15:54:13,250 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T15:54:13,250 INFO [master/85bef17d9292:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=85bef17d9292,36243,1733500452143-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T15:54:13,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba23e7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T15:54:13,292 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 85bef17d9292,36243,-1 for getting cluster id 2024-12-06T15:54:13,292 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T15:54:13,294 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9e762204-2b1b-4d97-a393-7efb59553d9e' 2024-12-06T15:54:13,294 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T15:54:13,294 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9e762204-2b1b-4d97-a393-7efb59553d9e" 2024-12-06T15:54:13,295 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d14315c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T15:54:13,295 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [85bef17d9292,36243,-1] 2024-12-06T15:54:13,295 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T15:54:13,296 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:13,297 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T15:54:13,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26cedc1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T15:54:13,299 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T15:54:13,300 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=85bef17d9292,35415,1733500452229, seqNum=-1] 2024-12-06T15:54:13,301 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T15:54:13,303 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T15:54:13,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=85bef17d9292,36243,1733500452143 2024-12-06T15:54:13,307 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T15:54:13,309 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 85bef17d9292,36243,1733500452143 2024-12-06T15:54:13,309 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@39ce9f5 2024-12-06T15:54:13,309 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T15:54:13,311 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T15:54:13,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T15:54:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-06T15:54:13,317 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T15:54:13,317 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:13,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-06T15:54:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:13,319 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T15:54:13,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741837_1013 (size=392) 2024-12-06T15:54:13,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741837_1013 (size=392) 2024-12-06T15:54:13,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741837_1013 (size=392) 2024-12-06T15:54:13,334 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8b6eeeb1bceeb71a692e0d3c97cd6320, NAME => 'TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485 2024-12-06T15:54:13,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741838_1014 (size=51) 2024-12-06T15:54:13,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741838_1014 (size=51) 2024-12-06T15:54:13,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741838_1014 (size=51) 2024-12-06T15:54:13,352 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:13,352 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 8b6eeeb1bceeb71a692e0d3c97cd6320, disabling compactions & flushes 2024-12-06T15:54:13,352 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,352 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,352 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. after waiting 0 ms 2024-12-06T15:54:13,352 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,352 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,353 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8b6eeeb1bceeb71a692e0d3c97cd6320: Waiting for close lock at 1733500453352Disabling compacts and flushes for region at 1733500453352Disabling writes for close at 1733500453352Writing region close event to WAL at 1733500453352Closed at 1733500453352 2024-12-06T15:54:13,355 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T15:54:13,355 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733500453355"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733500453355"}]},"ts":"1733500453355"} 2024-12-06T15:54:13,359 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T15:54:13,360 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T15:54:13,361 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733500453361"}]},"ts":"1733500453361"} 2024-12-06T15:54:13,364 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-06T15:54:13,364 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {85bef17d9292=0} racks are {/default-rack=0} 2024-12-06T15:54:13,365 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T15:54:13,365 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T15:54:13,365 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T15:54:13,365 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T15:54:13,366 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T15:54:13,366 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T15:54:13,366 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T15:54:13,366 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T15:54:13,366 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T15:54:13,366 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T15:54:13,366 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8b6eeeb1bceeb71a692e0d3c97cd6320, ASSIGN}] 2024-12-06T15:54:13,369 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8b6eeeb1bceeb71a692e0d3c97cd6320, ASSIGN 2024-12-06T15:54:13,371 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8b6eeeb1bceeb71a692e0d3c97cd6320, ASSIGN; state=OFFLINE, location=85bef17d9292,35415,1733500452229; forceNewPlan=false, retain=false 2024-12-06T15:54:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:13,521 INFO [85bef17d9292:36243 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T15:54:13,522 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8b6eeeb1bceeb71a692e0d3c97cd6320, regionState=OPENING, regionLocation=85bef17d9292,35415,1733500452229 2024-12-06T15:54:13,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8b6eeeb1bceeb71a692e0d3c97cd6320, ASSIGN because future has completed 2024-12-06T15:54:13,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8b6eeeb1bceeb71a692e0d3c97cd6320, server=85bef17d9292,35415,1733500452229}] 2024-12-06T15:54:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:13,693 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,694 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8b6eeeb1bceeb71a692e0d3c97cd6320, NAME => 'TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320.', STARTKEY => '', ENDKEY => ''} 2024-12-06T15:54:13,694 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,694 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T15:54:13,694 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,694 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,696 INFO [StoreOpener-8b6eeeb1bceeb71a692e0d3c97cd6320-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,698 INFO [StoreOpener-8b6eeeb1bceeb71a692e0d3c97cd6320-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8b6eeeb1bceeb71a692e0d3c97cd6320 columnFamilyName cf 2024-12-06T15:54:13,698 DEBUG [StoreOpener-8b6eeeb1bceeb71a692e0d3c97cd6320-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T15:54:13,698 INFO [StoreOpener-8b6eeeb1bceeb71a692e0d3c97cd6320-1 {}] regionserver.HStore(327): Store=8b6eeeb1bceeb71a692e0d3c97cd6320/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T15:54:13,699 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,699 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,700 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,700 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,700 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,702 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,705 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T15:54:13,706 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8b6eeeb1bceeb71a692e0d3c97cd6320; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64452858, jitterRate=-0.03957757353782654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T15:54:13,706 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:13,708 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8b6eeeb1bceeb71a692e0d3c97cd6320: Running coprocessor pre-open hook at 1733500453695Writing region info on filesystem at 1733500453695Initializing all the Stores at 1733500453696 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733500453696Cleaning up temporary data from old regions at 1733500453700 (+4 ms)Running coprocessor post-open hooks at 1733500453706 (+6 ms)Region opened successfully at 1733500453708 (+2 ms) 2024-12-06T15:54:13,710 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320., pid=6, masterSystemTime=1733500453688 2024-12-06T15:54:13,714 DEBUG [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,714 INFO [RS_OPEN_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:13,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8b6eeeb1bceeb71a692e0d3c97cd6320, regionState=OPEN, openSeqNum=2, regionLocation=85bef17d9292,35415,1733500452229 2024-12-06T15:54:13,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8b6eeeb1bceeb71a692e0d3c97cd6320, server=85bef17d9292,35415,1733500452229 because future has completed 2024-12-06T15:54:13,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T15:54:13,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8b6eeeb1bceeb71a692e0d3c97cd6320, server=85bef17d9292,35415,1733500452229 in 190 msec 2024-12-06T15:54:13,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T15:54:13,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8b6eeeb1bceeb71a692e0d3c97cd6320, ASSIGN in 361 msec 2024-12-06T15:54:13,733 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T15:54:13,733 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733500453733"}]},"ts":"1733500453733"} 2024-12-06T15:54:13,736 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-06T15:54:13,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T15:54:13,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 426 msec 2024-12-06T15:54:13,757 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T15:54:13,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T15:54:13,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T15:54:13,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T15:54:13,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T15:54:13,950 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-06T15:54:13,950 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T15:54:13,950 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T15:54:13,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-06T15:54:13,954 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T15:54:13,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-06T15:54:13,959 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320., hostname=85bef17d9292,35415,1733500452229, seqNum=2] 2024-12-06T15:54:13,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-06T15:54:13,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-06T15:54:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:13,967 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-06T15:54:13,968 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T15:54:13,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T15:54:14,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:14,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35415 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-06T15:54:14,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,124 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8b6eeeb1bceeb71a692e0d3c97cd6320 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-06T15:54:14,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/.tmp/cf/00238ec16ecd4c54ae9221beac39150e is 36, key is row/cf:cq/1733500453961/Put/seqid=0 2024-12-06T15:54:14,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741839_1015 (size=4787) 2024-12-06T15:54:14,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741839_1015 (size=4787) 2024-12-06T15:54:14,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741839_1015 (size=4787) 2024-12-06T15:54:14,153 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/.tmp/cf/00238ec16ecd4c54ae9221beac39150e 2024-12-06T15:54:14,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/.tmp/cf/00238ec16ecd4c54ae9221beac39150e as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/cf/00238ec16ecd4c54ae9221beac39150e 2024-12-06T15:54:14,172 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/cf/00238ec16ecd4c54ae9221beac39150e, entries=1, sequenceid=5, filesize=4.7 K 2024-12-06T15:54:14,174 INFO [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 8b6eeeb1bceeb71a692e0d3c97cd6320 in 49ms, sequenceid=5, compaction requested=false 2024-12-06T15:54:14,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8b6eeeb1bceeb71a692e0d3c97cd6320: 2024-12-06T15:54:14,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/85bef17d9292:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-06T15:54:14,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-06T15:54:14,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-06T15:54:14,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-12-06T15:54:14,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 219 msec 2024-12-06T15:54:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36243 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T15:54:14,289 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T15:54:14,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T15:54:14,294 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T15:54:14,294 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:14,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,294 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T15:54:14,295 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T15:54:14,295 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1819738662, stopped=false 2024-12-06T15:54:14,295 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=85bef17d9292,36243,1733500452143 2024-12-06T15:54:14,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:14,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:14,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:14,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T15:54:14,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:14,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:14,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:14,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:14,298 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T15:54:14,298 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T15:54:14,298 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:14,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:14,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:14,299 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:14,299 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T15:54:14,299 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '85bef17d9292,44761,1733500452199' ***** 2024-12-06T15:54:14,299 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T15:54:14,299 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '85bef17d9292,35415,1733500452229' ***** 2024-12-06T15:54:14,299 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T15:54:14,299 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '85bef17d9292,33825,1733500452271' ***** 2024-12-06T15:54:14,300 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T15:54:14,300 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T15:54:14,300 INFO [RS:1;85bef17d9292:35415 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T15:54:14,300 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T15:54:14,300 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T15:54:14,300 INFO [RS:1;85bef17d9292:35415 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T15:54:14,300 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T15:54:14,300 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(3091): Received CLOSE for 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:14,300 INFO [RS:2;85bef17d9292:33825 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T15:54:14,300 INFO [RS:2;85bef17d9292:33825 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T15:54:14,300 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(959): stopping server 85bef17d9292,33825,1733500452271 2024-12-06T15:54:14,300 INFO [RS:2;85bef17d9292:33825 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:14,300 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T15:54:14,300 INFO [RS:2;85bef17d9292:33825 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;85bef17d9292:33825. 2024-12-06T15:54:14,300 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(959): stopping server 85bef17d9292,35415,1733500452229 2024-12-06T15:54:14,300 INFO [RS:0;85bef17d9292:44761 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T15:54:14,301 INFO [RS:1;85bef17d9292:35415 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:14,301 INFO [RS:0;85bef17d9292:44761 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T15:54:14,301 DEBUG [RS:2;85bef17d9292:33825 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:14,301 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(959): stopping server 85bef17d9292,44761,1733500452199 2024-12-06T15:54:14,301 INFO [RS:1;85bef17d9292:35415 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;85bef17d9292:35415. 2024-12-06T15:54:14,301 DEBUG [RS:2;85bef17d9292:33825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,301 INFO [RS:0;85bef17d9292:44761 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:14,301 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8b6eeeb1bceeb71a692e0d3c97cd6320, disabling compactions & flushes 2024-12-06T15:54:14,301 INFO [RS:0;85bef17d9292:44761 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;85bef17d9292:44761. 2024-12-06T15:54:14,301 DEBUG [RS:1;85bef17d9292:35415 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:14,301 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(976): stopping server 85bef17d9292,33825,1733500452271; all regions closed. 2024-12-06T15:54:14,301 INFO [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,301 DEBUG [RS:1;85bef17d9292:35415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,301 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,301 DEBUG [RS:0;85bef17d9292:44761 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T15:54:14,301 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. after waiting 0 ms 2024-12-06T15:54:14,301 DEBUG [RS:0;85bef17d9292:44761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,301 INFO [RS:1;85bef17d9292:35415 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T15:54:14,301 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,301 INFO [RS:1;85bef17d9292:35415 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T15:54:14,301 INFO [RS:1;85bef17d9292:35415 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T15:54:14,301 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(976): stopping server 85bef17d9292,44761,1733500452199; all regions closed. 2024-12-06T15:54:14,301 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T15:54:14,301 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T15:54:14,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,302 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,303 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,305 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-06T15:54:14,305 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8b6eeeb1bceeb71a692e0d3c97cd6320=TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320.} 2024-12-06T15:54:14,305 DEBUG [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8b6eeeb1bceeb71a692e0d3c97cd6320 2024-12-06T15:54:14,305 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T15:54:14,305 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T15:54:14,305 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T15:54:14,305 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T15:54:14,305 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T15:54:14,305 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-06T15:54:14,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741834_1010 (size=93) 2024-12-06T15:54:14,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741835_1011 (size=93) 2024-12-06T15:54:14,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741835_1011 (size=93) 2024-12-06T15:54:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741834_1010 (size=93) 2024-12-06T15:54:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741834_1010 (size=93) 2024-12-06T15:54:14,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741835_1011 (size=93) 2024-12-06T15:54:14,313 DEBUG [RS:2;85bef17d9292:33825 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs 2024-12-06T15:54:14,313 INFO [RS:2;85bef17d9292:33825 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 85bef17d9292%2C33825%2C1733500452271:(num 1733500452806) 2024-12-06T15:54:14,313 DEBUG [RS:2;85bef17d9292:33825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,313 INFO [RS:2;85bef17d9292:33825 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:14,313 INFO [RS:2;85bef17d9292:33825 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:14,313 INFO [RS:2;85bef17d9292:33825 {}] hbase.ChoreService(370): Chore service for: regionserver/85bef17d9292:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:14,314 INFO [RS:2;85bef17d9292:33825 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T15:54:14,314 INFO [RS:2;85bef17d9292:33825 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T15:54:14,314 INFO [RS:2;85bef17d9292:33825 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T15:54:14,314 INFO [RS:2;85bef17d9292:33825 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:14,314 INFO [RS:2;85bef17d9292:33825 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33825 2024-12-06T15:54:14,314 DEBUG [RS:0;85bef17d9292:44761 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs 2024-12-06T15:54:14,314 INFO [RS:0;85bef17d9292:44761 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 85bef17d9292%2C44761%2C1733500452199:(num 1733500452807) 2024-12-06T15:54:14,314 DEBUG [RS:0;85bef17d9292:44761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,315 INFO [regionserver/85bef17d9292:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:14,315 INFO [RS:0;85bef17d9292:44761 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:14,315 INFO [RS:0;85bef17d9292:44761 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:14,316 INFO [RS:0;85bef17d9292:44761 {}] hbase.ChoreService(370): Chore service for: regionserver/85bef17d9292:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:14,316 INFO [RS:0;85bef17d9292:44761 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T15:54:14,316 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/default/TestHBaseWalOnEC/8b6eeeb1bceeb71a692e0d3c97cd6320/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-06T15:54:14,316 INFO [RS:0;85bef17d9292:44761 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T15:54:14,316 INFO [RS:0;85bef17d9292:44761 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T15:54:14,316 INFO [RS:0;85bef17d9292:44761 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:14,316 INFO [RS:0;85bef17d9292:44761 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44761 2024-12-06T15:54:14,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:14,317 INFO [regionserver/85bef17d9292:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:14,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/85bef17d9292,33825,1733500452271 2024-12-06T15:54:14,317 INFO [RS:2;85bef17d9292:33825 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:14,318 INFO [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,319 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8b6eeeb1bceeb71a692e0d3c97cd6320: Waiting for close lock at 1733500454301Running coprocessor pre-close hooks at 1733500454301Disabling compacts and flushes for region at 1733500454301Disabling writes for close at 1733500454301Writing region close event to WAL at 1733500454305 (+4 ms)Running coprocessor post-close hooks at 1733500454318 (+13 ms)Closed at 1733500454318 2024-12-06T15:54:14,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/85bef17d9292,44761,1733500452199 2024-12-06T15:54:14,319 DEBUG [RS_CLOSE_REGION-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320. 2024-12-06T15:54:14,319 INFO [RS:0;85bef17d9292:44761 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:14,320 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [85bef17d9292,44761,1733500452199] 2024-12-06T15:54:14,322 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/85bef17d9292,44761,1733500452199 already deleted, retry=false 2024-12-06T15:54:14,322 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 85bef17d9292,44761,1733500452199 expired; onlineServers=2 2024-12-06T15:54:14,322 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [85bef17d9292,33825,1733500452271] 2024-12-06T15:54:14,323 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/85bef17d9292,33825,1733500452271 already deleted, retry=false 2024-12-06T15:54:14,324 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 85bef17d9292,33825,1733500452271 expired; onlineServers=1 2024-12-06T15:54:14,332 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/info/fb5b67cfe6864153b2fb746f2f464fe5 is 153, key is TestHBaseWalOnEC,,1733500453312.8b6eeeb1bceeb71a692e0d3c97cd6320./info:regioninfo/1733500453715/Put/seqid=0 2024-12-06T15:54:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741840_1016 (size=6637) 2024-12-06T15:54:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741840_1016 (size=6637) 2024-12-06T15:54:14,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741840_1016 (size=6637) 2024-12-06T15:54:14,340 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/info/fb5b67cfe6864153b2fb746f2f464fe5 2024-12-06T15:54:14,353 INFO [regionserver/85bef17d9292:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:14,358 INFO [regionserver/85bef17d9292:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:14,364 INFO [regionserver/85bef17d9292:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:14,365 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/ns/c6fe5d7754ed4bbd9b58955d50f7fa3d is 43, key is default/ns:d/1733500453229/Put/seqid=0 2024-12-06T15:54:14,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741841_1017 (size=5153) 2024-12-06T15:54:14,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741841_1017 (size=5153) 2024-12-06T15:54:14,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741841_1017 (size=5153) 2024-12-06T15:54:14,378 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/ns/c6fe5d7754ed4bbd9b58955d50f7fa3d 2024-12-06T15:54:14,411 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/table/ae3d091358dd43b899dd20ab7e8b8eae is 52, key is TestHBaseWalOnEC/table:state/1733500453733/Put/seqid=0 2024-12-06T15:54:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741842_1018 (size=5249) 2024-12-06T15:54:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741842_1018 (size=5249) 2024-12-06T15:54:14,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741842_1018 (size=5249) 2024-12-06T15:54:14,420 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/table/ae3d091358dd43b899dd20ab7e8b8eae 2024-12-06T15:54:14,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,420 INFO [RS:2;85bef17d9292:33825 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:14,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33825-0x100680975fe0003, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,421 INFO [RS:2;85bef17d9292:33825 {}] regionserver.HRegionServer(1031): Exiting; stopping=85bef17d9292,33825,1733500452271; zookeeper connection closed. 2024-12-06T15:54:14,421 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@49d54469 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@49d54469 2024-12-06T15:54:14,422 INFO [RS:0;85bef17d9292:44761 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:14,422 INFO [RS:0;85bef17d9292:44761 {}] regionserver.HRegionServer(1031): Exiting; stopping=85bef17d9292,44761,1733500452199; zookeeper connection closed. 2024-12-06T15:54:14,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44761-0x100680975fe0001, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,422 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@36172809 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@36172809 2024-12-06T15:54:14,428 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/info/fb5b67cfe6864153b2fb746f2f464fe5 as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/info/fb5b67cfe6864153b2fb746f2f464fe5 2024-12-06T15:54:14,436 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/info/fb5b67cfe6864153b2fb746f2f464fe5, entries=10, sequenceid=11, filesize=6.5 K 2024-12-06T15:54:14,438 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/ns/c6fe5d7754ed4bbd9b58955d50f7fa3d as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/ns/c6fe5d7754ed4bbd9b58955d50f7fa3d 2024-12-06T15:54:14,447 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/ns/c6fe5d7754ed4bbd9b58955d50f7fa3d, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T15:54:14,449 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/.tmp/table/ae3d091358dd43b899dd20ab7e8b8eae as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/table/ae3d091358dd43b899dd20ab7e8b8eae 2024-12-06T15:54:14,457 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/table/ae3d091358dd43b899dd20ab7e8b8eae, entries=2, sequenceid=11, filesize=5.1 K 2024-12-06T15:54:14,459 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-12-06T15:54:14,465 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T15:54:14,465 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T15:54:14,465 INFO [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T15:54:14,466 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733500454305Running coprocessor pre-close hooks at 1733500454305Disabling compacts and flushes for region at 1733500454305Disabling writes for close at 1733500454305Obtaining lock to block concurrent updates at 1733500454306 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733500454306Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733500454306Flushing stores of hbase:meta,,1.1588230740 at 1733500454307 (+1 ms)Flushing 1588230740/info: creating writer at 1733500454308 (+1 ms)Flushing 1588230740/info: appending metadata at 1733500454331 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733500454331Flushing 1588230740/ns: creating writer at 1733500454348 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733500454365 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733500454365Flushing 1588230740/table: creating writer at 1733500454387 (+22 ms)Flushing 1588230740/table: appending metadata at 1733500454410 (+23 ms)Flushing 1588230740/table: closing flushed file at 1733500454410Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48bbba3a: reopening flushed file at 1733500454427 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45caf0f7: reopening flushed file at 1733500454436 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c676254: reopening flushed file at 1733500454448 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1733500454459 (+11 ms)Writing region close event to WAL at 1733500454460 (+1 ms)Running coprocessor post-close hooks at 1733500454465 (+5 ms)Closed at 1733500454465 2024-12-06T15:54:14,466 DEBUG [RS_CLOSE_META-regionserver/85bef17d9292:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T15:54:14,505 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(976): stopping server 85bef17d9292,35415,1733500452229; all regions closed. 2024-12-06T15:54:14,506 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,506 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,506 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,506 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,506 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741836_1012 (size=2751) 2024-12-06T15:54:14,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741836_1012 (size=2751) 2024-12-06T15:54:14,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741836_1012 (size=2751) 2024-12-06T15:54:14,513 DEBUG [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs 2024-12-06T15:54:14,514 INFO [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 85bef17d9292%2C35415%2C1733500452229.meta:.meta(num 1733500453165) 2024-12-06T15:54:14,514 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,514 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,514 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,514 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,515 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741833_1009 (size=1298) 2024-12-06T15:54:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741833_1009 (size=1298) 2024-12-06T15:54:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741833_1009 (size=1298) 2024-12-06T15:54:14,521 DEBUG [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/oldWALs 2024-12-06T15:54:14,521 INFO [RS:1;85bef17d9292:35415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 85bef17d9292%2C35415%2C1733500452229:(num 1733500452803) 2024-12-06T15:54:14,521 DEBUG [RS:1;85bef17d9292:35415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T15:54:14,521 INFO [RS:1;85bef17d9292:35415 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T15:54:14,522 INFO [RS:1;85bef17d9292:35415 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:14,522 INFO [RS:1;85bef17d9292:35415 {}] hbase.ChoreService(370): Chore service for: regionserver/85bef17d9292:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:14,522 INFO [RS:1;85bef17d9292:35415 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:14,522 INFO [regionserver/85bef17d9292:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:14,522 INFO [RS:1;85bef17d9292:35415 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35415 2024-12-06T15:54:14,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T15:54:14,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/85bef17d9292,35415,1733500452229 2024-12-06T15:54:14,524 INFO [RS:1;85bef17d9292:35415 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:14,526 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [85bef17d9292,35415,1733500452229] 2024-12-06T15:54:14,527 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/85bef17d9292,35415,1733500452229 already deleted, retry=false 2024-12-06T15:54:14,528 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 85bef17d9292,35415,1733500452229 expired; onlineServers=0 2024-12-06T15:54:14,528 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '85bef17d9292,36243,1733500452143' ***** 2024-12-06T15:54:14,528 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T15:54:14,528 INFO [M:0;85bef17d9292:36243 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T15:54:14,528 INFO [M:0;85bef17d9292:36243 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T15:54:14,528 DEBUG [M:0;85bef17d9292:36243 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T15:54:14,528 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T15:54:14,528 DEBUG [M:0;85bef17d9292:36243 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T15:54:14,528 DEBUG [master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.small.0-1733500452525 {}] cleaner.HFileCleaner(306): Exit Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.small.0-1733500452525,5,FailOnTimeoutGroup] 2024-12-06T15:54:14,528 DEBUG [master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.large.0-1733500452525 {}] cleaner.HFileCleaner(306): Exit Thread[master/85bef17d9292:0:becomeActiveMaster-HFileCleaner.large.0-1733500452525,5,FailOnTimeoutGroup] 2024-12-06T15:54:14,528 INFO [M:0;85bef17d9292:36243 {}] hbase.ChoreService(370): Chore service for: master/85bef17d9292:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T15:54:14,528 INFO [M:0;85bef17d9292:36243 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T15:54:14,529 DEBUG [M:0;85bef17d9292:36243 {}] master.HMaster(1795): Stopping service threads 2024-12-06T15:54:14,529 INFO [M:0;85bef17d9292:36243 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T15:54:14,529 INFO [M:0;85bef17d9292:36243 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T15:54:14,529 INFO [M:0;85bef17d9292:36243 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T15:54:14,529 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T15:54:14,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T15:54:14,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T15:54:14,530 DEBUG [M:0;85bef17d9292:36243 {}] zookeeper.ZKUtil(347): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T15:54:14,530 WARN [M:0;85bef17d9292:36243 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T15:54:14,531 INFO [M:0;85bef17d9292:36243 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/.lastflushedseqids 2024-12-06T15:54:14,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741843_1019 (size=127) 2024-12-06T15:54:14,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741843_1019 (size=127) 2024-12-06T15:54:14,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741843_1019 (size=127) 2024-12-06T15:54:14,540 INFO [M:0;85bef17d9292:36243 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T15:54:14,540 INFO [M:0;85bef17d9292:36243 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T15:54:14,541 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T15:54:14,541 INFO [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:14,541 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:14,541 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T15:54:14,541 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:14,541 INFO [M:0;85bef17d9292:36243 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-06T15:54:14,568 DEBUG [M:0;85bef17d9292:36243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7844239477eb47acbd5b4c22c8d5fc14 is 82, key is hbase:meta,,1/info:regioninfo/1733500453211/Put/seqid=0 2024-12-06T15:54:14,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741844_1020 (size=5672) 2024-12-06T15:54:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741844_1020 (size=5672) 2024-12-06T15:54:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741844_1020 (size=5672) 2024-12-06T15:54:14,584 INFO [M:0;85bef17d9292:36243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7844239477eb47acbd5b4c22c8d5fc14 2024-12-06T15:54:14,635 INFO [RS:1;85bef17d9292:35415 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:14,635 INFO [RS:1;85bef17d9292:35415 {}] regionserver.HRegionServer(1031): Exiting; stopping=85bef17d9292,35415,1733500452229; zookeeper connection closed. 2024-12-06T15:54:14,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35415-0x100680975fe0002, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,640 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e8cff37 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e8cff37 2024-12-06T15:54:14,640 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-06T15:54:14,651 DEBUG [M:0;85bef17d9292:36243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4ad3c67576664aa5b12b741c0d793c2c is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733500453739/Put/seqid=0 2024-12-06T15:54:14,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741845_1021 (size=6438) 2024-12-06T15:54:14,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741845_1021 (size=6438) 2024-12-06T15:54:14,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741845_1021 (size=6438) 2024-12-06T15:54:14,660 INFO [M:0;85bef17d9292:36243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4ad3c67576664aa5b12b741c0d793c2c 2024-12-06T15:54:14,682 DEBUG [M:0;85bef17d9292:36243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9996700c8f4c47a99fe44719955bd04b is 69, key is 85bef17d9292,33825,1733500452271/rs:state/1733500452624/Put/seqid=0 2024-12-06T15:54:14,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741846_1022 (size=5294) 2024-12-06T15:54:14,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741846_1022 (size=5294) 2024-12-06T15:54:14,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741846_1022 (size=5294) 2024-12-06T15:54:14,690 INFO [M:0;85bef17d9292:36243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9996700c8f4c47a99fe44719955bd04b 2024-12-06T15:54:14,697 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7844239477eb47acbd5b4c22c8d5fc14 as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7844239477eb47acbd5b4c22c8d5fc14 2024-12-06T15:54:14,703 INFO [M:0;85bef17d9292:36243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7844239477eb47acbd5b4c22c8d5fc14, entries=8, sequenceid=72, filesize=5.5 K 2024-12-06T15:54:14,704 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4ad3c67576664aa5b12b741c0d793c2c as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4ad3c67576664aa5b12b741c0d793c2c 2024-12-06T15:54:14,710 INFO [M:0;85bef17d9292:36243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4ad3c67576664aa5b12b741c0d793c2c, entries=8, sequenceid=72, filesize=6.3 K 2024-12-06T15:54:14,711 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9996700c8f4c47a99fe44719955bd04b as hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9996700c8f4c47a99fe44719955bd04b 2024-12-06T15:54:14,717 INFO [M:0;85bef17d9292:36243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40973/user/jenkins/test-data/c49ae734-b8f7-c4e8-f4e9-773de06c6485/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9996700c8f4c47a99fe44719955bd04b, entries=3, sequenceid=72, filesize=5.2 K 2024-12-06T15:54:14,719 INFO [M:0;85bef17d9292:36243 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=72, compaction requested=false 2024-12-06T15:54:14,721 INFO [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T15:54:14,721 DEBUG [M:0;85bef17d9292:36243 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733500454541Disabling compacts and flushes for region at 1733500454541Disabling writes for close at 1733500454541Obtaining lock to block concurrent updates at 1733500454541Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733500454541Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733500454542 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733500454543 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733500454543Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733500454567 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733500454567Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733500454592 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733500454650 (+58 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733500454650Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733500454667 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733500454681 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733500454682 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49b75c27: reopening flushed file at 1733500454696 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65793d11: reopening flushed file at 1733500454703 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13b42931: reopening flushed file at 1733500454710 (+7 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=72, compaction requested=false at 1733500454719 (+9 ms)Writing region close event to WAL at 1733500454721 (+2 ms)Closed at 1733500454721 2024-12-06T15:54:14,722 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,722 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,722 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,722 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,722 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T15:54:14,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34481 is added to blk_1073741830_1006 (size=32662) 2024-12-06T15:54:14,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33193 is added to blk_1073741830_1006 (size=32662) 2024-12-06T15:54:14,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42625 is added to blk_1073741830_1006 (size=32662) 2024-12-06T15:54:14,726 INFO [M:0;85bef17d9292:36243 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T15:54:14,726 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T15:54:14,726 INFO [M:0;85bef17d9292:36243 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36243 2024-12-06T15:54:14,726 INFO [M:0;85bef17d9292:36243 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T15:54:14,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,828 INFO [M:0;85bef17d9292:36243 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T15:54:14,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36243-0x100680975fe0000, quorum=127.0.0.1:54026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T15:54:14,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25c162fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:14,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1da1746e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:14,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:14,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@739551bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:14,832 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7adc0795{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:14,833 WARN [BP-1078682521-172.17.0.2-1733500451221 heartbeating to localhost/127.0.0.1:40973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T15:54:14,833 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T15:54:14,833 WARN [BP-1078682521-172.17.0.2-1733500451221 heartbeating to localhost/127.0.0.1:40973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1078682521-172.17.0.2-1733500451221 (Datanode Uuid dea8f4c0-72bb-4080-a1e3-3f65858921dd) service to localhost/127.0.0.1:40973 2024-12-06T15:54:14,833 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T15:54:14,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data5/current/BP-1078682521-172.17.0.2-1733500451221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:14,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data6/current/BP-1078682521-172.17.0.2-1733500451221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:14,835 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T15:54:14,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@609afea4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:14,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@527d4f04{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:14,839 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:14,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f8ad177{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:14,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69cb0b1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:14,840 WARN [BP-1078682521-172.17.0.2-1733500451221 heartbeating to localhost/127.0.0.1:40973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T15:54:14,840 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T15:54:14,840 WARN [BP-1078682521-172.17.0.2-1733500451221 heartbeating to localhost/127.0.0.1:40973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1078682521-172.17.0.2-1733500451221 (Datanode Uuid b5652060-87ee-486d-8b83-0eccf5c09641) service to localhost/127.0.0.1:40973 2024-12-06T15:54:14,840 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T15:54:14,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data3/current/BP-1078682521-172.17.0.2-1733500451221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:14,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data4/current/BP-1078682521-172.17.0.2-1733500451221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:14,841 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T15:54:14,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14abb266{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T15:54:14,845 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@348ccaab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:14,845 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:14,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c2dd4e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:14,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a6744cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:14,847 WARN [BP-1078682521-172.17.0.2-1733500451221 heartbeating to localhost/127.0.0.1:40973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T15:54:14,847 WARN [BP-1078682521-172.17.0.2-1733500451221 heartbeating to localhost/127.0.0.1:40973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1078682521-172.17.0.2-1733500451221 (Datanode Uuid d5c72180-5c32-4d40-8f69-0c3d6e7df1e1) service to localhost/127.0.0.1:40973 2024-12-06T15:54:14,847 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T15:54:14,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T15:54:14,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data1/current/BP-1078682521-172.17.0.2-1733500451221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:14,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/cluster_18913181-b46e-8b6f-2867-c0670697e455/data/data2/current/BP-1078682521-172.17.0.2-1733500451221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T15:54:14,848 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T15:54:14,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b25f894{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T15:54:14,855 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53797dc3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T15:54:14,855 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T15:54:14,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@715f09c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T15:54:14,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49b2b984{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2591f1cf-9a83-d7dc-e801-2e6fb157d6a5/hadoop.log.dir/,STOPPED} 2024-12-06T15:54:14,866 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T15:54:14,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T15:54:14,897 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 88) - Thread LEAK? -, OpenFileDescriptor=518 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=269 (was 249) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9074 (was 9297)