2024-12-05 12:31:02,698 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 12:31:02,710 main DEBUG Took 0.010280 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 12:31:02,710 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 12:31:02,710 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 12:31:02,711 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 12:31:02,713 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,722 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 12:31:02,742 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,744 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,744 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,745 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,746 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,747 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,747 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,748 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,749 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,750 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,751 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,752 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,753 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,753 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,754 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,754 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,755 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,756 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,756 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:31:02,757 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,758 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 12:31:02,760 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:31:02,761 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 12:31:02,764 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 12:31:02,765 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 12:31:02,766 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 12:31:02,767 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 12:31:02,777 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 12:31:02,780 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 12:31:02,782 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 12:31:02,782 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 12:31:02,783 main DEBUG createAppenders(={Console}) 2024-12-05 12:31:02,784 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-05 12:31:02,784 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 12:31:02,784 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-05 12:31:02,785 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 12:31:02,786 main DEBUG OutputStream closed 2024-12-05 12:31:02,786 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 12:31:02,786 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 12:31:02,787 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-05 12:31:02,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 12:31:02,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 12:31:02,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 12:31:02,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 12:31:02,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 12:31:02,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 12:31:02,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 12:31:02,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 12:31:02,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 12:31:02,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 12:31:02,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 12:31:02,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 12:31:02,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 12:31:02,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 12:31:02,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 12:31:02,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 12:31:02,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 12:31:02,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 12:31:02,878 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 12:31:02,878 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-05 12:31:02,878 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 12:31:02,879 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-05T12:31:02,895 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-05 12:31:02,898 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 12:31:02,898 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T12:31:03,193 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115 2024-12-05T12:31:03,224 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334, deleteOnExit=true 2024-12-05T12:31:03,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/test.cache.data in system properties and HBase conf 2024-12-05T12:31:03,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T12:31:03,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir in system properties and HBase conf 2024-12-05T12:31:03,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T12:31:03,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T12:31:03,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T12:31:03,338 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T12:31:03,459 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T12:31:03,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:31:03,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:31:03,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T12:31:03,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:31:03,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T12:31:03,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T12:31:03,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:31:03,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:31:03,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T12:31:03,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/nfs.dump.dir in system properties and HBase conf 2024-12-05T12:31:03,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/java.io.tmpdir in system properties and HBase conf 2024-12-05T12:31:03,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:31:03,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T12:31:03,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T12:31:04,320 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T12:31:04,401 INFO [Time-limited test {}] log.Log(170): Logging initialized @2395ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T12:31:04,481 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:04,555 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:04,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:04,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:04,576 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T12:31:04,589 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:04,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:04,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:04,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/java.io.tmpdir/jetty-localhost-43961-hadoop-hdfs-3_4_1-tests_jar-_-any-6733231581410235675/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T12:31:04,828 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:43961} 2024-12-05T12:31:04,829 INFO [Time-limited test {}] server.Server(415): Started @2824ms 2024-12-05T12:31:05,228 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:05,237 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:05,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:05,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:05,239 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T12:31:05,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:05,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:05,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/java.io.tmpdir/jetty-localhost-43035-hadoop-hdfs-3_4_1-tests_jar-_-any-3039481039074802740/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:05,373 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:43035} 2024-12-05T12:31:05,373 INFO [Time-limited test {}] server.Server(415): Started @3369ms 2024-12-05T12:31:05,429 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:31:05,556 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:05,562 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:05,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:05,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:05,565 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:31:05,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:05,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:05,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/java.io.tmpdir/jetty-localhost-41937-hadoop-hdfs-3_4_1-tests_jar-_-any-5521007383057406055/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:05,692 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:41937} 2024-12-05T12:31:05,692 INFO [Time-limited test {}] server.Server(415): Started @3688ms 2024-12-05T12:31:05,695 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:31:05,740 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:05,746 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:05,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:05,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:05,750 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:31:05,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:05,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:05,886 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data1/current/BP-743128014-172.17.0.2-1733401864075/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:05,886 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data4/current/BP-743128014-172.17.0.2-1733401864075/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:05,887 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data3/current/BP-743128014-172.17.0.2-1733401864075/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:05,889 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data2/current/BP-743128014-172.17.0.2-1733401864075/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:05,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/java.io.tmpdir/jetty-localhost-37247-hadoop-hdfs-3_4_1-tests_jar-_-any-9262717552927377376/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:05,907 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:37247} 2024-12-05T12:31:05,908 INFO [Time-limited test {}] server.Server(415): Started @3903ms 2024-12-05T12:31:05,911 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:31:05,937 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:31:05,944 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:31:06,012 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f65c942270a684d with lease ID 0xcb3ee9028cc0d99f: Processing first storage report for DS-8c31d81e-5946-42b1-80ec-7b705af31906 from datanode DatanodeRegistration(127.0.0.1:33847, datanodeUuid=a86eef30-d633-403a-91d3-5d57c0f5ef07, infoPort=37957, infoSecurePort=0, ipcPort=44851, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075) 2024-12-05T12:31:06,013 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f65c942270a684d with lease ID 0xcb3ee9028cc0d99f: from storage DS-8c31d81e-5946-42b1-80ec-7b705af31906 node DatanodeRegistration(127.0.0.1:33847, datanodeUuid=a86eef30-d633-403a-91d3-5d57c0f5ef07, infoPort=37957, infoSecurePort=0, ipcPort=44851, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-05T12:31:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25ccc37f844e65a9 with lease ID 0xcb3ee9028cc0d9a0: Processing first storage report for DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c from datanode DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ad7e54f4-e677-4555-844c-f46253668bca, infoPort=41727, infoSecurePort=0, ipcPort=40031, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075) 2024-12-05T12:31:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25ccc37f844e65a9 with lease ID 0xcb3ee9028cc0d9a0: from storage DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c node DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ad7e54f4-e677-4555-844c-f46253668bca, infoPort=41727, infoSecurePort=0, ipcPort=40031, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:06,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25ccc37f844e65a9 with lease ID 0xcb3ee9028cc0d9a0: Processing first storage report for DS-e45f1cbb-2b3d-48b2-9224-66f6dfb96ccc from datanode DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ad7e54f4-e677-4555-844c-f46253668bca, infoPort=41727, infoSecurePort=0, ipcPort=40031, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075) 2024-12-05T12:31:06,015 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25ccc37f844e65a9 with lease ID 0xcb3ee9028cc0d9a0: from storage DS-e45f1cbb-2b3d-48b2-9224-66f6dfb96ccc node DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ad7e54f4-e677-4555-844c-f46253668bca, infoPort=41727, infoSecurePort=0, ipcPort=40031, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T12:31:06,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f65c942270a684d with lease ID 0xcb3ee9028cc0d99f: Processing first storage report for DS-bb309bd6-d8a8-4772-92c1-e968e1fcf59f from datanode DatanodeRegistration(127.0.0.1:33847, datanodeUuid=a86eef30-d633-403a-91d3-5d57c0f5ef07, infoPort=37957, infoSecurePort=0, ipcPort=44851, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075) 2024-12-05T12:31:06,016 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f65c942270a684d with lease ID 0xcb3ee9028cc0d99f: from storage DS-bb309bd6-d8a8-4772-92c1-e968e1fcf59f node DatanodeRegistration(127.0.0.1:33847, datanodeUuid=a86eef30-d633-403a-91d3-5d57c0f5ef07, infoPort=37957, infoSecurePort=0, ipcPort=44851, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:06,034 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data5/current/BP-743128014-172.17.0.2-1733401864075/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:06,034 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data6/current/BP-743128014-172.17.0.2-1733401864075/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:06,061 WARN [Thread-126 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:31:06,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3d93794147d9e065 with lease ID 0xcb3ee9028cc0d9a1: Processing first storage report for DS-9f57a884-33ae-4d52-9063-112bbd3c8122 from datanode DatanodeRegistration(127.0.0.1:37519, datanodeUuid=60eee8e3-a461-4da9-8a30-3048fa8df334, infoPort=41615, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075) 2024-12-05T12:31:06,066 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3d93794147d9e065 with lease ID 0xcb3ee9028cc0d9a1: from storage DS-9f57a884-33ae-4d52-9063-112bbd3c8122 node DatanodeRegistration(127.0.0.1:37519, datanodeUuid=60eee8e3-a461-4da9-8a30-3048fa8df334, infoPort=41615, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T12:31:06,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3d93794147d9e065 with lease ID 0xcb3ee9028cc0d9a1: Processing first storage report for DS-bfa12af8-82e5-475d-bad2-a0eec25e87c6 from datanode DatanodeRegistration(127.0.0.1:37519, datanodeUuid=60eee8e3-a461-4da9-8a30-3048fa8df334, infoPort=41615, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075) 2024-12-05T12:31:06,066 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3d93794147d9e065 with lease ID 0xcb3ee9028cc0d9a1: from storage DS-bfa12af8-82e5-475d-bad2-a0eec25e87c6 node DatanodeRegistration(127.0.0.1:37519, datanodeUuid=60eee8e3-a461-4da9-8a30-3048fa8df334, infoPort=41615, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1681272035;c=1733401864075), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:06,329 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115 2024-12-05T12:31:06,420 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-05T12:31:06,476 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=365, ProcessCount=11, AvailableMemoryMB=3309 2024-12-05T12:31:06,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T12:31:06,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-05T12:31:06,587 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/zookeeper_0, clientPort=51200, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T12:31:06,597 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51200 2024-12-05T12:31:06,609 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:06,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:06,700 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:06,700 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:06,750 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:57522 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57522 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:06,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-05T12:31:07,171 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:07,181 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df with version=8 2024-12-05T12:31:07,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/hbase-staging 2024-12-05T12:31:07,274 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T12:31:07,520 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:07,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:07,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:07,538 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:07,539 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:07,539 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:07,687 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T12:31:07,747 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T12:31:07,756 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T12:31:07,760 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:07,787 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 11374 (auto-detected) 2024-12-05T12:31:07,788 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T12:31:07,807 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36431 2024-12-05T12:31:07,828 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36431 connecting to ZooKeeper ensemble=127.0.0.1:51200 2024-12-05T12:31:07,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364310x0, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:07,864 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36431-0x100aa6b7be20000 connected 2024-12-05T12:31:07,897 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:07,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:07,910 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:07,914 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df, hbase.cluster.distributed=false 2024-12-05T12:31:07,937 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:07,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36431 2024-12-05T12:31:07,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36431 2024-12-05T12:31:07,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36431 2024-12-05T12:31:07,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36431 2024-12-05T12:31:07,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36431 2024-12-05T12:31:08,057 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:08,058 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,059 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:08,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:08,062 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:31:08,064 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:08,065 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38835 2024-12-05T12:31:08,067 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38835 connecting to ZooKeeper ensemble=127.0.0.1:51200 2024-12-05T12:31:08,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388350x0, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:08,080 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38835-0x100aa6b7be20001 connected 2024-12-05T12:31:08,082 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:08,087 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:31:08,097 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T12:31:08,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:31:08,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:08,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38835 2024-12-05T12:31:08,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38835 2024-12-05T12:31:08,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38835 2024-12-05T12:31:08,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38835 2024-12-05T12:31:08,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38835 2024-12-05T12:31:08,138 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:08,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,139 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:08,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:08,139 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:31:08,140 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:08,141 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42013 2024-12-05T12:31:08,142 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42013 connecting to ZooKeeper ensemble=127.0.0.1:51200 2024-12-05T12:31:08,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420130x0, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:08,154 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:08,154 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42013-0x100aa6b7be20002 connected 2024-12-05T12:31:08,154 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:31:08,157 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T12:31:08,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:31:08,161 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:08,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42013 2024-12-05T12:31:08,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42013 2024-12-05T12:31:08,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42013 2024-12-05T12:31:08,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42013 2024-12-05T12:31:08,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42013 2024-12-05T12:31:08,197 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:08,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,198 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:08,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:08,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:08,198 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:31:08,198 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:08,199 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45411 2024-12-05T12:31:08,201 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45411 connecting to ZooKeeper ensemble=127.0.0.1:51200 2024-12-05T12:31:08,203 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454110x0, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:08,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45411-0x100aa6b7be20003 connected 2024-12-05T12:31:08,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:08,213 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:31:08,213 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T12:31:08,214 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:31:08,216 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:08,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45411 2024-12-05T12:31:08,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45411 2024-12-05T12:31:08,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45411 2024-12-05T12:31:08,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45411 2024-12-05T12:31:08,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45411 2024-12-05T12:31:08,235 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2556e7014d8b:36431 2024-12-05T12:31:08,235 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2556e7014d8b,36431,1733401867325 2024-12-05T12:31:08,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,245 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2556e7014d8b,36431,1733401867325 2024-12-05T12:31:08,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:08,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:08,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:08,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,277 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T12:31:08,278 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2556e7014d8b,36431,1733401867325 from backup master directory 2024-12-05T12:31:08,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2556e7014d8b,36431,1733401867325 2024-12-05T12:31:08,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:08,283 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:08,283 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2556e7014d8b,36431,1733401867325 2024-12-05T12:31:08,285 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T12:31:08,286 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T12:31:08,349 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/hbase.id] with ID: e4403138-9baa-409b-9db4-f65006c37426 2024-12-05T12:31:08,349 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/.tmp/hbase.id 2024-12-05T12:31:08,356 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,356 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,359 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41204 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41204 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:08,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-05T12:31:08,365 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:08,365 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/.tmp/hbase.id]:[hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/hbase.id] 2024-12-05T12:31:08,409 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:08,414 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T12:31:08,433 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-05T12:31:08,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:08,448 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,448 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,451 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41218 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41218 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:08,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-05T12:31:08,459 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:08,474 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:31:08,476 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T12:31:08,482 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:31:08,509 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,509 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41224 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41224 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:08,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-05T12:31:08,520 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:08,543 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store 2024-12-05T12:31:08,560 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,560 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:08,567 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:56592 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56592 dst: /127.0.0.1:36151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:08,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-05T12:31:08,572 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:08,577 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T12:31:08,579 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:08,581 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T12:31:08,581 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:08,581 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:08,582 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T12:31:08,582 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:08,583 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:08,584 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733401868580Disabling compacts and flushes for region at 1733401868580Disabling writes for close at 1733401868582 (+2 ms)Writing region close event to WAL at 1733401868583 (+1 ms)Closed at 1733401868583 2024-12-05T12:31:08,586 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/.initializing 2024-12-05T12:31:08,586 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/WALs/2556e7014d8b,36431,1733401867325 2024-12-05T12:31:08,594 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:31:08,612 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C36431%2C1733401867325, suffix=, logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/WALs/2556e7014d8b,36431,1733401867325, archiveDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/oldWALs, maxLogs=10 2024-12-05T12:31:08,643 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/WALs/2556e7014d8b,36431,1733401867325/2556e7014d8b%2C36431%2C1733401867325.1733401868618, exclude list is [], retry=0 2024-12-05T12:31:08,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:08,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36151,DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c,DISK] 2024-12-05T12:31:08,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37519,DS-9f57a884-33ae-4d52-9063-112bbd3c8122,DISK] 2024-12-05T12:31:08,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33847,DS-8c31d81e-5946-42b1-80ec-7b705af31906,DISK] 2024-12-05T12:31:08,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T12:31:08,706 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/WALs/2556e7014d8b,36431,1733401867325/2556e7014d8b%2C36431%2C1733401867325.1733401868618 2024-12-05T12:31:08,707 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41615:41615),(127.0.0.1/127.0.0.1:37957:37957),(127.0.0.1/127.0.0.1:41727:41727)] 2024-12-05T12:31:08,708 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:31:08,708 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:08,711 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,712 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,751 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T12:31:08,783 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:08,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:08,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T12:31:08,790 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:08,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:08,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T12:31:08,794 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:08,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:08,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T12:31:08,797 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:08,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:08,799 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,802 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,803 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,809 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,810 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,813 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:31:08,817 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:08,823 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:31:08,824 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62507340, jitterRate=-0.06856805086135864}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:31:08,832 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733401868724Initializing all the Stores at 1733401868726 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401868726Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401868728 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401868729 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401868729Cleaning up temporary data from old regions at 1733401868810 (+81 ms)Region opened successfully at 1733401868832 (+22 ms) 2024-12-05T12:31:08,833 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T12:31:08,874 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@610e8192, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:08,910 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T12:31:08,921 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T12:31:08,921 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T12:31:08,924 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T12:31:08,925 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T12:31:08,930 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-05T12:31:08,930 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T12:31:08,955 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T12:31:08,963 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T12:31:08,965 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T12:31:08,968 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T12:31:08,969 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T12:31:08,971 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T12:31:08,973 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T12:31:08,976 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T12:31:08,977 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T12:31:08,979 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T12:31:08,980 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T12:31:08,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-05T12:31:08,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-05T12:31:09,001 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T12:31:09,002 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T12:31:09,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:09,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,010 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2556e7014d8b,36431,1733401867325, sessionid=0x100aa6b7be20000, setting cluster-up flag (Was=false) 2024-12-05T12:31:09,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,028 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T12:31:09,030 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2556e7014d8b,36431,1733401867325 2024-12-05T12:31:09,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,041 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T12:31:09,042 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2556e7014d8b,36431,1733401867325 2024-12-05T12:31:09,049 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T12:31:09,124 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(746): ClusterId : e4403138-9baa-409b-9db4-f65006c37426 2024-12-05T12:31:09,124 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(746): ClusterId : e4403138-9baa-409b-9db4-f65006c37426 2024-12-05T12:31:09,126 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(746): ClusterId : e4403138-9baa-409b-9db4-f65006c37426 2024-12-05T12:31:09,128 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:31:09,128 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:31:09,128 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:31:09,134 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:31:09,134 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:31:09,134 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:31:09,134 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:31:09,134 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:31:09,134 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:31:09,137 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:09,137 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:31:09,138 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:31:09,138 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:31:09,138 DEBUG [RS:2;2556e7014d8b:45411 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23dbbf55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:09,138 DEBUG [RS:1;2556e7014d8b:42013 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68ddc1d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:09,138 DEBUG [RS:0;2556e7014d8b:38835 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74baca69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:09,150 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T12:31:09,155 DEBUG [RS:0;2556e7014d8b:38835 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2556e7014d8b:38835 2024-12-05T12:31:09,159 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:31:09,159 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:31:09,159 DEBUG [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:31:09,159 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T12:31:09,161 DEBUG [RS:1;2556e7014d8b:42013 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2556e7014d8b:42013 2024-12-05T12:31:09,161 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:31:09,161 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:31:09,161 DEBUG [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:31:09,162 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(2659): reportForDuty to master=2556e7014d8b,36431,1733401867325 with port=38835, startcode=1733401868016 2024-12-05T12:31:09,162 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(2659): reportForDuty to master=2556e7014d8b,36431,1733401867325 with port=42013, startcode=1733401868137 2024-12-05T12:31:09,165 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2556e7014d8b:45411 2024-12-05T12:31:09,165 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:31:09,166 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:31:09,166 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:31:09,167 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(2659): reportForDuty to master=2556e7014d8b,36431,1733401867325 with port=45411, startcode=1733401868196 2024-12-05T12:31:09,167 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2556e7014d8b,36431,1733401867325 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T12:31:09,174 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:09,174 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:09,174 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:09,174 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:09,175 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2556e7014d8b:0, corePoolSize=10, maxPoolSize=10 2024-12-05T12:31:09,175 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,175 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:09,175 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,177 DEBUG [RS:2;2556e7014d8b:45411 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:31:09,177 DEBUG [RS:0;2556e7014d8b:38835 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:31:09,177 DEBUG [RS:1;2556e7014d8b:42013 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:31:09,184 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:09,184 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T12:31:09,185 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733401899185 2024-12-05T12:31:09,187 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T12:31:09,188 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T12:31:09,192 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T12:31:09,192 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:09,192 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T12:31:09,192 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T12:31:09,192 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T12:31:09,192 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T12:31:09,194 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,198 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T12:31:09,199 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T12:31:09,200 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T12:31:09,205 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T12:31:09,206 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T12:31:09,208 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.large.0-1733401869207,5,FailOnTimeoutGroup] 2024-12-05T12:31:09,209 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:09,209 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:09,211 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.small.0-1733401869208,5,FailOnTimeoutGroup] 2024-12-05T12:31:09,211 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,212 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T12:31:09,213 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,213 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,221 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37275, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:31:09,221 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52625, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:31:09,221 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53699, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:31:09,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:54444 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:37519:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54444 dst: /127.0.0.1:37519 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:09,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,229 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36431 {}] master.ServerManager(517): Registering regionserver=2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-05T12:31:09,240 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:09,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2556e7014d8b,38835,1733401868016 2024-12-05T12:31:09,241 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T12:31:09,242 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36431 {}] master.ServerManager(517): Registering regionserver=2556e7014d8b,38835,1733401868016 2024-12-05T12:31:09,242 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df 2024-12-05T12:31:09,248 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df 2024-12-05T12:31:09,248 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43397 2024-12-05T12:31:09,248 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:31:09,249 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2556e7014d8b,42013,1733401868137 2024-12-05T12:31:09,249 DEBUG [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df 2024-12-05T12:31:09,249 DEBUG [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43397 2024-12-05T12:31:09,250 DEBUG [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:31:09,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36431 {}] master.ServerManager(517): Registering regionserver=2556e7014d8b,42013,1733401868137 2024-12-05T12:31:09,253 DEBUG [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df 2024-12-05T12:31:09,253 DEBUG [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43397 2024-12-05T12:31:09,254 DEBUG [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:31:09,256 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:09,256 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:09,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:09,258 DEBUG [RS:0;2556e7014d8b:38835 {}] zookeeper.ZKUtil(111): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2556e7014d8b,38835,1733401868016 2024-12-05T12:31:09,259 WARN [RS:0;2556e7014d8b:38835 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:09,259 DEBUG [RS:2;2556e7014d8b:45411 {}] zookeeper.ZKUtil(111): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,259 INFO [RS:0;2556e7014d8b:38835 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:31:09,259 WARN [RS:2;2556e7014d8b:45411 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:09,259 DEBUG [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,38835,1733401868016 2024-12-05T12:31:09,259 INFO [RS:2;2556e7014d8b:45411 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:31:09,259 DEBUG [RS:1;2556e7014d8b:42013 {}] zookeeper.ZKUtil(111): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2556e7014d8b,42013,1733401868137 2024-12-05T12:31:09,259 WARN [RS:1;2556e7014d8b:42013 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:09,259 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,259 INFO [RS:1;2556e7014d8b:42013 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:31:09,260 DEBUG [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,42013,1733401868137 2024-12-05T12:31:09,261 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2556e7014d8b,38835,1733401868016] 2024-12-05T12:31:09,261 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2556e7014d8b,42013,1733401868137] 2024-12-05T12:31:09,261 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2556e7014d8b,45411,1733401868196] 2024-12-05T12:31:09,271 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:54458 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:37519:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54458 dst: /127.0.0.1:37519 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:09,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-05T12:31:09,283 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:09,284 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:09,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T12:31:09,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T12:31:09,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:09,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:09,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T12:31:09,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T12:31:09,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:09,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:09,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T12:31:09,297 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:31:09,297 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:31:09,297 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:31:09,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T12:31:09,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:09,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:09,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T12:31:09,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T12:31:09,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:09,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:09,305 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T12:31:09,306 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740 2024-12-05T12:31:09,307 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740 2024-12-05T12:31:09,311 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T12:31:09,311 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T12:31:09,311 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:31:09,311 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:31:09,312 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:31:09,314 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T12:31:09,318 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:31:09,322 INFO [RS:2;2556e7014d8b:45411 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:31:09,322 INFO [RS:1;2556e7014d8b:42013 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:31:09,322 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,322 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,323 INFO [RS:0;2556e7014d8b:38835 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:31:09,324 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,325 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:31:09,325 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:31:09,325 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:31:09,331 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:31:09,332 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:31:09,332 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:31:09,333 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,333 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,333 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,333 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,333 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,333 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,333 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,333 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:09,334 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:09,334 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:09,334 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,334 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:09,335 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,335 DEBUG [RS:1;2556e7014d8b:42013 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:09,335 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:09,336 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:09,336 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:09,336 DEBUG [RS:2;2556e7014d8b:45411 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:09,336 DEBUG [RS:0;2556e7014d8b:38835 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:09,344 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,344 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,344 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,344 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,42013,1733401868137-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,45411,1733401868196-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,345 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,38835,1733401868016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:09,353 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:31:09,354 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67835832, jitterRate=0.010832667350769043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:31:09,356 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733401869284Initializing all the Stores at 1733401869286 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401869287 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401869288 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401869288Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401869289 (+1 ms)Cleaning up temporary data from old regions at 1733401869311 (+22 ms)Region opened successfully at 1733401869356 (+45 ms) 2024-12-05T12:31:09,357 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T12:31:09,357 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T12:31:09,357 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T12:31:09,357 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T12:31:09,357 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T12:31:09,358 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T12:31:09,358 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733401869356Disabling compacts and flushes for region at 1733401869356Disabling writes for close at 1733401869357 (+1 ms)Writing region close event to WAL at 1733401869358 (+1 ms)Closed at 1733401869358 2024-12-05T12:31:09,362 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:09,362 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T12:31:09,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T12:31:09,375 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:31:09,375 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:31:09,375 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:31:09,377 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,38835,1733401868016-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,377 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,42013,1733401868137-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,377 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,45411,1733401868196-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,378 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,378 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,378 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,378 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.Replication(171): 2556e7014d8b,42013,1733401868137 started 2024-12-05T12:31:09,378 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.Replication(171): 2556e7014d8b,45411,1733401868196 started 2024-12-05T12:31:09,378 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.Replication(171): 2556e7014d8b,38835,1733401868016 started 2024-12-05T12:31:09,384 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T12:31:09,389 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T12:31:09,401 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,401 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(1482): Serving as 2556e7014d8b,42013,1733401868137, RpcServer on 2556e7014d8b/172.17.0.2:42013, sessionid=0x100aa6b7be20002 2024-12-05T12:31:09,402 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:31:09,402 DEBUG [RS:1;2556e7014d8b:42013 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2556e7014d8b,42013,1733401868137 2024-12-05T12:31:09,402 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,42013,1733401868137' 2024-12-05T12:31:09,403 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:31:09,403 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:31:09,404 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:31:09,404 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,404 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:31:09,404 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(1482): Serving as 2556e7014d8b,38835,1733401868016, RpcServer on 2556e7014d8b/172.17.0.2:38835, sessionid=0x100aa6b7be20001 2024-12-05T12:31:09,404 DEBUG [RS:1;2556e7014d8b:42013 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2556e7014d8b,42013,1733401868137 2024-12-05T12:31:09,404 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,42013,1733401868137' 2024-12-05T12:31:09,404 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:31:09,405 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:31:09,405 DEBUG [RS:0;2556e7014d8b:38835 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2556e7014d8b,38835,1733401868016 2024-12-05T12:31:09,405 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:09,405 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,38835,1733401868016' 2024-12-05T12:31:09,405 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:31:09,405 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1482): Serving as 2556e7014d8b,45411,1733401868196, RpcServer on 2556e7014d8b/172.17.0.2:45411, sessionid=0x100aa6b7be20003 2024-12-05T12:31:09,405 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:31:09,405 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:31:09,405 DEBUG [RS:2;2556e7014d8b:45411 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,405 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,45411,1733401868196' 2024-12-05T12:31:09,405 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:31:09,405 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:31:09,406 DEBUG [RS:1;2556e7014d8b:42013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:31:09,406 INFO [RS:1;2556e7014d8b:42013 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:31:09,406 INFO [RS:1;2556e7014d8b:42013 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:31:09,406 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:31:09,406 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:31:09,406 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:31:09,406 DEBUG [RS:0;2556e7014d8b:38835 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2556e7014d8b,38835,1733401868016 2024-12-05T12:31:09,406 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,38835,1733401868016' 2024-12-05T12:31:09,406 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:31:09,407 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:31:09,407 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:31:09,407 DEBUG [RS:2;2556e7014d8b:45411 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,407 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:31:09,407 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,45411,1733401868196' 2024-12-05T12:31:09,407 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:31:09,408 DEBUG [RS:0;2556e7014d8b:38835 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:31:09,408 INFO [RS:0;2556e7014d8b:38835 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:31:09,408 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:31:09,408 INFO [RS:0;2556e7014d8b:38835 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:31:09,409 DEBUG [RS:2;2556e7014d8b:45411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:31:09,409 INFO [RS:2;2556e7014d8b:45411 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:31:09,409 INFO [RS:2;2556e7014d8b:45411 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:31:09,513 INFO [RS:2;2556e7014d8b:45411 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:31:09,513 INFO [RS:1;2556e7014d8b:42013 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:31:09,513 INFO [RS:0;2556e7014d8b:38835 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:31:09,517 INFO [RS:1;2556e7014d8b:42013 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C42013%2C1733401868137, suffix=, logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,42013,1733401868137, archiveDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs, maxLogs=32 2024-12-05T12:31:09,517 INFO [RS:0;2556e7014d8b:38835 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C38835%2C1733401868016, suffix=, logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,38835,1733401868016, archiveDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs, maxLogs=32 2024-12-05T12:31:09,517 INFO [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C45411%2C1733401868196, suffix=, logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196, archiveDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs, maxLogs=32 2024-12-05T12:31:09,538 DEBUG [RS:2;2556e7014d8b:45411 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196/2556e7014d8b%2C45411%2C1733401868196.1733401869522, exclude list is [], retry=0 2024-12-05T12:31:09,540 WARN [2556e7014d8b:36431 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T12:31:09,541 DEBUG [RS:1;2556e7014d8b:42013 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,42013,1733401868137/2556e7014d8b%2C42013%2C1733401868137.1733401869522, exclude list is [], retry=0 2024-12-05T12:31:09,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33847,DS-8c31d81e-5946-42b1-80ec-7b705af31906,DISK] 2024-12-05T12:31:09,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36151,DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c,DISK] 2024-12-05T12:31:09,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33847,DS-8c31d81e-5946-42b1-80ec-7b705af31906,DISK] 2024-12-05T12:31:09,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37519,DS-9f57a884-33ae-4d52-9063-112bbd3c8122,DISK] 2024-12-05T12:31:09,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37519,DS-9f57a884-33ae-4d52-9063-112bbd3c8122,DISK] 2024-12-05T12:31:09,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36151,DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c,DISK] 2024-12-05T12:31:09,576 DEBUG [RS:0;2556e7014d8b:38835 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,38835,1733401868016/2556e7014d8b%2C38835%2C1733401868016.1733401869522, exclude list is [], retry=0 2024-12-05T12:31:09,587 INFO [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196/2556e7014d8b%2C45411%2C1733401868196.1733401869522 2024-12-05T12:31:09,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36151,DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c,DISK] 2024-12-05T12:31:09,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33847,DS-8c31d81e-5946-42b1-80ec-7b705af31906,DISK] 2024-12-05T12:31:09,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37519,DS-9f57a884-33ae-4d52-9063-112bbd3c8122,DISK] 2024-12-05T12:31:09,604 INFO [RS:1;2556e7014d8b:42013 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,42013,1733401868137/2556e7014d8b%2C42013%2C1733401868137.1733401869522 2024-12-05T12:31:09,615 DEBUG [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37957:37957),(127.0.0.1/127.0.0.1:41727:41727),(127.0.0.1/127.0.0.1:41615:41615)] 2024-12-05T12:31:09,620 DEBUG [RS:1;2556e7014d8b:42013 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41727:41727),(127.0.0.1/127.0.0.1:37957:37957),(127.0.0.1/127.0.0.1:41615:41615)] 2024-12-05T12:31:09,620 INFO [RS:0;2556e7014d8b:38835 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,38835,1733401868016/2556e7014d8b%2C38835%2C1733401868016.1733401869522 2024-12-05T12:31:09,636 DEBUG [RS:0;2556e7014d8b:38835 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37957:37957),(127.0.0.1/127.0.0.1:41727:41727),(127.0.0.1/127.0.0.1:41615:41615)] 2024-12-05T12:31:09,793 DEBUG [2556e7014d8b:36431 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T12:31:09,801 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(204): Hosts are {2556e7014d8b=0} racks are {/default-rack=0} 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:31:09,808 INFO [2556e7014d8b:36431 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:31:09,808 INFO [2556e7014d8b:36431 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:31:09,808 INFO [2556e7014d8b:36431 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:31:09,808 DEBUG [2556e7014d8b:36431 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:31:09,818 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2556e7014d8b,45411,1733401868196 2024-12-05T12:31:09,825 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2556e7014d8b,45411,1733401868196, state=OPENING 2024-12-05T12:31:09,831 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T12:31:09,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:09,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:09,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:09,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:09,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:09,836 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T12:31:09,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2556e7014d8b,45411,1733401868196}] 2024-12-05T12:31:10,013 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:31:10,015 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37651, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:31:10,026 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T12:31:10,027 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:31:10,028 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T12:31:10,031 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C45411%2C1733401868196.meta, suffix=.meta, logDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196, archiveDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs, maxLogs=32 2024-12-05T12:31:10,048 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196/2556e7014d8b%2C45411%2C1733401868196.meta.1733401870033.meta, exclude list is [], retry=0 2024-12-05T12:31:10,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37519,DS-9f57a884-33ae-4d52-9063-112bbd3c8122,DISK] 2024-12-05T12:31:10,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33847,DS-8c31d81e-5946-42b1-80ec-7b705af31906,DISK] 2024-12-05T12:31:10,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36151,DS-0fdc3f9e-ae59-40fb-9c6c-6d47d898064c,DISK] 2024-12-05T12:31:10,058 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/WALs/2556e7014d8b,45411,1733401868196/2556e7014d8b%2C45411%2C1733401868196.meta.1733401870033.meta 2024-12-05T12:31:10,058 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41615:41615),(127.0.0.1/127.0.0.1:37957:37957),(127.0.0.1/127.0.0.1:41727:41727)] 2024-12-05T12:31:10,058 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:31:10,060 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T12:31:10,063 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T12:31:10,067 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T12:31:10,072 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T12:31:10,073 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:10,073 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T12:31:10,073 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T12:31:10,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T12:31:10,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T12:31:10,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:10,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:10,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T12:31:10,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T12:31:10,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:10,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:10,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T12:31:10,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T12:31:10,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:10,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:10,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T12:31:10,085 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T12:31:10,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:10,086 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:10,086 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T12:31:10,087 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740 2024-12-05T12:31:10,116 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740 2024-12-05T12:31:10,121 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T12:31:10,121 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T12:31:10,122 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:31:10,126 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T12:31:10,128 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64645249, jitterRate=-0.036710724234580994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:31:10,128 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T12:31:10,130 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733401870073Writing region info on filesystem at 1733401870074 (+1 ms)Initializing all the Stores at 1733401870076 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401870076Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401870076Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401870076Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401870076Cleaning up temporary data from old regions at 1733401870121 (+45 ms)Running coprocessor post-open hooks at 1733401870128 (+7 ms)Region opened successfully at 1733401870129 (+1 ms) 2024-12-05T12:31:10,137 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733401870004 2024-12-05T12:31:10,156 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T12:31:10,156 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T12:31:10,160 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2556e7014d8b,45411,1733401868196 2024-12-05T12:31:10,162 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2556e7014d8b,45411,1733401868196, state=OPEN 2024-12-05T12:31:10,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:10,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:10,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:10,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:10,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:10,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:10,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:10,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:10,167 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2556e7014d8b,45411,1733401868196 2024-12-05T12:31:10,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T12:31:10,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2556e7014d8b,45411,1733401868196 in 329 msec 2024-12-05T12:31:10,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T12:31:10,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 806 msec 2024-12-05T12:31:10,181 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:10,181 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T12:31:10,204 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:31:10,205 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2556e7014d8b,45411,1733401868196, seqNum=-1] 2024-12-05T12:31:10,229 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:31:10,231 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50127, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:31:10,252 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1720 sec 2024-12-05T12:31:10,252 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733401870252, completionTime=-1 2024-12-05T12:31:10,254 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T12:31:10,254 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T12:31:10,282 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T12:31:10,283 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733401930283 2024-12-05T12:31:10,283 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733401990283 2024-12-05T12:31:10,283 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-12-05T12:31:10,284 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:31:10,291 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,36431,1733401867325-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,291 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,36431,1733401867325-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,291 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,36431,1733401867325-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,293 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2556e7014d8b:36431, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,293 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,294 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,301 DEBUG [master/2556e7014d8b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T12:31:10,323 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.040sec 2024-12-05T12:31:10,325 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T12:31:10,326 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T12:31:10,327 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T12:31:10,328 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T12:31:10,328 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T12:31:10,329 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,36431,1733401867325-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:10,330 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,36431,1733401867325-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T12:31:10,334 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T12:31:10,335 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T12:31:10,335 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,36431,1733401867325-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:10,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cd2ba82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:31:10,438 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T12:31:10,438 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T12:31:10,442 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2556e7014d8b,36431,-1 for getting cluster id 2024-12-05T12:31:10,445 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:31:10,452 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e4403138-9baa-409b-9db4-f65006c37426' 2024-12-05T12:31:10,454 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:31:10,455 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e4403138-9baa-409b-9db4-f65006c37426" 2024-12-05T12:31:10,455 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2837e7a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:31:10,455 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2556e7014d8b,36431,-1] 2024-12-05T12:31:10,457 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:31:10,459 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:10,460 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:31:10,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d5c4d62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:31:10,464 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:31:10,471 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2556e7014d8b,45411,1733401868196, seqNum=-1] 2024-12-05T12:31:10,472 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:31:10,474 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35744, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:31:10,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2556e7014d8b,36431,1733401867325 2024-12-05T12:31:10,505 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T12:31:10,510 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2556e7014d8b,36431,1733401867325 2024-12-05T12:31:10,513 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2c34363b 2024-12-05T12:31:10,515 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T12:31:10,517 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57390, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T12:31:10,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:31:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T12:31:10,532 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:31:10,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T12:31:10,534 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:10,537 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:31:10,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:10,546 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:10,546 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:10,549 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:54530 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:37519:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54530 dst: /127.0.0.1:37519 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:10,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-05T12:31:10,556 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:10,559 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d41aea73c3bff27c7520497c6b69c6d0, NAME => 'TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df 2024-12-05T12:31:10,566 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:10,566 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:10,573 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:56666 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:36151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56666 dst: /127.0.0.1:36151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:10,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-05T12:31:10,581 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:10,582 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:10,582 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing d41aea73c3bff27c7520497c6b69c6d0, disabling compactions & flushes 2024-12-05T12:31:10,582 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,582 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,582 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. after waiting 0 ms 2024-12-05T12:31:10,582 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,582 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,582 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for d41aea73c3bff27c7520497c6b69c6d0: Waiting for close lock at 1733401870582Disabling compacts and flushes for region at 1733401870582Disabling writes for close at 1733401870582Writing region close event to WAL at 1733401870582Closed at 1733401870582 2024-12-05T12:31:10,584 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:31:10,589 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733401870584"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733401870584"}]},"ts":"1733401870584"} 2024-12-05T12:31:10,594 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T12:31:10,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:31:10,598 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733401870595"}]},"ts":"1733401870595"} 2024-12-05T12:31:10,602 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T12:31:10,603 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2556e7014d8b=0} racks are {/default-rack=0} 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:31:10,604 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:31:10,604 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:31:10,604 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:31:10,604 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:31:10,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d41aea73c3bff27c7520497c6b69c6d0, ASSIGN}] 2024-12-05T12:31:10,609 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d41aea73c3bff27c7520497c6b69c6d0, ASSIGN 2024-12-05T12:31:10,611 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d41aea73c3bff27c7520497c6b69c6d0, ASSIGN; state=OFFLINE, location=2556e7014d8b,45411,1733401868196; forceNewPlan=false, retain=false 2024-12-05T12:31:10,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:10,763 INFO [2556e7014d8b:36431 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T12:31:10,764 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d41aea73c3bff27c7520497c6b69c6d0, regionState=OPENING, regionLocation=2556e7014d8b,45411,1733401868196 2024-12-05T12:31:10,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d41aea73c3bff27c7520497c6b69c6d0, ASSIGN because future has completed 2024-12-05T12:31:10,769 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d41aea73c3bff27c7520497c6b69c6d0, server=2556e7014d8b,45411,1733401868196}] 2024-12-05T12:31:10,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:10,929 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,929 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d41aea73c3bff27c7520497c6b69c6d0, NAME => 'TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:31:10,930 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,930 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:10,930 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,930 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,932 INFO [StoreOpener-d41aea73c3bff27c7520497c6b69c6d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,934 INFO [StoreOpener-d41aea73c3bff27c7520497c6b69c6d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d41aea73c3bff27c7520497c6b69c6d0 columnFamilyName cf 2024-12-05T12:31:10,934 DEBUG [StoreOpener-d41aea73c3bff27c7520497c6b69c6d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:10,935 INFO [StoreOpener-d41aea73c3bff27c7520497c6b69c6d0-1 {}] regionserver.HStore(327): Store=d41aea73c3bff27c7520497c6b69c6d0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:10,935 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,936 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,937 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,938 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,938 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,940 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,945 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:31:10,946 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d41aea73c3bff27c7520497c6b69c6d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69145063, jitterRate=0.030341729521751404}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:31:10,946 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:10,947 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d41aea73c3bff27c7520497c6b69c6d0: Running coprocessor pre-open hook at 1733401870930Writing region info on filesystem at 1733401870930Initializing all the Stores at 1733401870932 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401870932Cleaning up temporary data from old regions at 1733401870938 (+6 ms)Running coprocessor post-open hooks at 1733401870946 (+8 ms)Region opened successfully at 1733401870947 (+1 ms) 2024-12-05T12:31:10,949 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0., pid=6, masterSystemTime=1733401870923 2024-12-05T12:31:10,952 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,952 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:10,953 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d41aea73c3bff27c7520497c6b69c6d0, regionState=OPEN, openSeqNum=2, regionLocation=2556e7014d8b,45411,1733401868196 2024-12-05T12:31:10,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d41aea73c3bff27c7520497c6b69c6d0, server=2556e7014d8b,45411,1733401868196 because future has completed 2024-12-05T12:31:10,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T12:31:10,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d41aea73c3bff27c7520497c6b69c6d0, server=2556e7014d8b,45411,1733401868196 in 189 msec 2024-12-05T12:31:10,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T12:31:10,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d41aea73c3bff27c7520497c6b69c6d0, ASSIGN in 356 msec 2024-12-05T12:31:10,969 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:31:10,969 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733401870969"}]},"ts":"1733401870969"} 2024-12-05T12:31:10,972 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T12:31:10,974 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:31:10,978 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 449 msec 2024-12-05T12:31:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:11,169 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T12:31:11,169 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T12:31:11,171 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:31:11,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T12:31:11,177 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:31:11,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T12:31:11,186 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0., hostname=2556e7014d8b,45411,1733401868196, seqNum=2] 2024-12-05T12:31:11,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T12:31:11,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T12:31:11,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:11,203 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T12:31:11,205 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T12:31:11,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T12:31:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:11,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45411 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T12:31:11,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,372 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing d41aea73c3bff27c7520497c6b69c6d0 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T12:31:11,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/.tmp/cf/c793936ccdbb4a1a92e8975664453d3e is 36, key is row/cf:cq/1733401871188/Put/seqid=0 2024-12-05T12:31:11,433 WARN [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,433 WARN [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006402303_22 at /127.0.0.1:56674 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56674 dst: /127.0.0.1:36151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:11,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-05T12:31:11,442 WARN [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:11,442 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/.tmp/cf/c793936ccdbb4a1a92e8975664453d3e 2024-12-05T12:31:11,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/.tmp/cf/c793936ccdbb4a1a92e8975664453d3e as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/cf/c793936ccdbb4a1a92e8975664453d3e 2024-12-05T12:31:11,496 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/cf/c793936ccdbb4a1a92e8975664453d3e, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T12:31:11,503 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for d41aea73c3bff27c7520497c6b69c6d0 in 130ms, sequenceid=5, compaction requested=false 2024-12-05T12:31:11,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-05T12:31:11,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for d41aea73c3bff27c7520497c6b69c6d0: 2024-12-05T12:31:11,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T12:31:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T12:31:11,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T12:31:11,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 305 msec 2024-12-05T12:31:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:11,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 318 msec 2024-12-05T12:31:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:11,828 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T12:31:11,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T12:31:11,843 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:31:11,843 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:11,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,850 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,850 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:31:11,850 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T12:31:11,850 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=532819799, stopped=false 2024-12-05T12:31:11,850 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2556e7014d8b,36431,1733401867325 2024-12-05T12:31:11,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:11,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:11,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:11,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:11,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:11,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:11,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:11,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:11,853 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T12:31:11,853 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:31:11,853 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:11,853 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:11,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:11,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:11,854 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2556e7014d8b,38835,1733401868016' ***** 2024-12-05T12:31:11,854 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:31:11,854 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2556e7014d8b,42013,1733401868137' ***** 2024-12-05T12:31:11,854 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:31:11,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:11,854 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2556e7014d8b,45411,1733401868196' ***** 2024-12-05T12:31:11,854 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:31:11,855 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:31:11,855 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:31:11,855 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:31:11,855 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:31:11,855 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:31:11,855 INFO [RS:0;2556e7014d8b:38835 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:31:11,855 INFO [RS:2;2556e7014d8b:45411 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:31:11,855 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:31:11,855 INFO [RS:1;2556e7014d8b:42013 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:31:11,855 INFO [RS:2;2556e7014d8b:45411 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:31:11,855 INFO [RS:0;2556e7014d8b:38835 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:31:11,855 INFO [RS:1;2556e7014d8b:42013 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:31:11,855 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(959): stopping server 2556e7014d8b,42013,1733401868137 2024-12-05T12:31:11,855 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(959): stopping server 2556e7014d8b,38835,1733401868016 2024-12-05T12:31:11,856 INFO [RS:1;2556e7014d8b:42013 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:11,856 INFO [RS:0;2556e7014d8b:38835 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:11,856 INFO [RS:1;2556e7014d8b:42013 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2556e7014d8b:42013. 2024-12-05T12:31:11,856 INFO [RS:0;2556e7014d8b:38835 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2556e7014d8b:38835. 2024-12-05T12:31:11,856 DEBUG [RS:1;2556e7014d8b:42013 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:11,856 DEBUG [RS:0;2556e7014d8b:38835 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:11,856 DEBUG [RS:1;2556e7014d8b:42013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,856 DEBUG [RS:0;2556e7014d8b:38835 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,856 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(3091): Received CLOSE for d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:11,856 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(976): stopping server 2556e7014d8b,38835,1733401868016; all regions closed. 2024-12-05T12:31:11,856 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(976): stopping server 2556e7014d8b,42013,1733401868137; all regions closed. 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(959): stopping server 2556e7014d8b,45411,1733401868196 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2556e7014d8b:45411. 2024-12-05T12:31:11,857 DEBUG [RS:2;2556e7014d8b:45411 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:11,857 DEBUG [RS:2;2556e7014d8b:45411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:31:11,857 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T12:31:11,857 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d41aea73c3bff27c7520497c6b69c6d0, disabling compactions & flushes 2024-12-05T12:31:11,858 INFO [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,858 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. after waiting 0 ms 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,858 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1325): Online Regions={d41aea73c3bff27c7520497c6b69c6d0=TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0., 1588230740=hbase:meta,,1.1588230740} 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T12:31:11,858 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T12:31:11,858 DEBUG [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d41aea73c3bff27c7520497c6b69c6d0 2024-12-05T12:31:11,858 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T12:31:11,858 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T12:31:11,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_1073741827_1017 (size=93) 2024-12-05T12:31:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741827_1017 (size=93) 2024-12-05T12:31:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_1073741828_1018 (size=93) 2024-12-05T12:31:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741828_1018 (size=93) 2024-12-05T12:31:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741827_1017 (size=93) 2024-12-05T12:31:11,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741828_1018 (size=93) 2024-12-05T12:31:11,875 DEBUG [RS:1;2556e7014d8b:42013 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs 2024-12-05T12:31:11,875 INFO [RS:1;2556e7014d8b:42013 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2556e7014d8b%2C42013%2C1733401868137:(num 1733401869522) 2024-12-05T12:31:11,875 DEBUG [RS:1;2556e7014d8b:42013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,875 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:11,876 DEBUG [RS:0;2556e7014d8b:38835 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs 2024-12-05T12:31:11,876 INFO [RS:1;2556e7014d8b:42013 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:11,876 INFO [RS:0;2556e7014d8b:38835 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2556e7014d8b%2C38835%2C1733401868016:(num 1733401869522) 2024-12-05T12:31:11,876 DEBUG [RS:0;2556e7014d8b:38835 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:11,876 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:11,876 INFO [RS:0;2556e7014d8b:38835 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:11,876 INFO [RS:1;2556e7014d8b:42013 {}] hbase.ChoreService(370): Chore service for: regionserver/2556e7014d8b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:11,877 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:31:11,877 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:31:11,877 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:31:11,877 INFO [RS:1;2556e7014d8b:42013 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:11,877 INFO [RS:1;2556e7014d8b:42013 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42013 2024-12-05T12:31:11,879 INFO [regionserver/2556e7014d8b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:11,879 INFO [RS:0;2556e7014d8b:38835 {}] hbase.ChoreService(370): Chore service for: regionserver/2556e7014d8b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:11,879 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:31:11,880 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:31:11,880 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:31:11,880 INFO [RS:0;2556e7014d8b:38835 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:11,880 INFO [RS:0;2556e7014d8b:38835 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38835 2024-12-05T12:31:11,880 INFO [regionserver/2556e7014d8b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:11,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2556e7014d8b,42013,1733401868137 2024-12-05T12:31:11,881 INFO [RS:1;2556e7014d8b:42013 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:11,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:11,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2556e7014d8b,38835,1733401868016 2024-12-05T12:31:11,883 INFO [RS:0;2556e7014d8b:38835 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:11,884 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2556e7014d8b,38835,1733401868016] 2024-12-05T12:31:11,886 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2556e7014d8b,38835,1733401868016 already deleted, retry=false 2024-12-05T12:31:11,886 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2556e7014d8b,38835,1733401868016 expired; onlineServers=2 2024-12-05T12:31:11,886 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2556e7014d8b,42013,1733401868137] 2024-12-05T12:31:11,888 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2556e7014d8b,42013,1733401868137 already deleted, retry=false 2024-12-05T12:31:11,888 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2556e7014d8b,42013,1733401868137 expired; onlineServers=1 2024-12-05T12:31:11,890 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/default/TestHBaseWalOnEC/d41aea73c3bff27c7520497c6b69c6d0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:31:11,893 INFO [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,893 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d41aea73c3bff27c7520497c6b69c6d0: Waiting for close lock at 1733401871857Running coprocessor pre-close hooks at 1733401871857Disabling compacts and flushes for region at 1733401871857Disabling writes for close at 1733401871858 (+1 ms)Writing region close event to WAL at 1733401871859 (+1 ms)Running coprocessor post-close hooks at 1733401871891 (+32 ms)Closed at 1733401871893 (+2 ms) 2024-12-05T12:31:11,893 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0. 2024-12-05T12:31:11,898 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/info/7acbbb6b16e248099be2ac22abe847f5 is 153, key is TestHBaseWalOnEC,,1733401870518.d41aea73c3bff27c7520497c6b69c6d0./info:regioninfo/1733401870953/Put/seqid=0 2024-12-05T12:31:11,901 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,901 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006402303_22 at /127.0.0.1:41290 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41290 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:11,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-05T12:31:11,909 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:11,910 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/info/7acbbb6b16e248099be2ac22abe847f5 2024-12-05T12:31:11,940 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/ns/8bf0bb156ca34acdaf59efc2ba84695c is 43, key is default/ns:d/1733401870235/Put/seqid=0 2024-12-05T12:31:11,943 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,943 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006402303_22 at /127.0.0.1:56690 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56690 dst: /127.0.0.1:36151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:11,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-05T12:31:11,951 INFO [regionserver/2556e7014d8b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:11,951 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:11,951 INFO [regionserver/2556e7014d8b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:11,951 INFO [regionserver/2556e7014d8b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:11,951 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/ns/8bf0bb156ca34acdaf59efc2ba84695c 2024-12-05T12:31:11,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-05T12:31:11,982 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/table/225209a98be74cb2824dcc75fd75f54f is 52, key is TestHBaseWalOnEC/table:state/1733401870969/Put/seqid=0 2024-12-05T12:31:11,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-05T12:31:11,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-05T12:31:11,984 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,984 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:11,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:11,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42013-0x100aa6b7be20002, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:11,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:11,985 INFO [RS:0;2556e7014d8b:38835 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:11,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38835-0x100aa6b7be20001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:11,986 INFO [RS:0;2556e7014d8b:38835 {}] regionserver.HRegionServer(1031): Exiting; stopping=2556e7014d8b,38835,1733401868016; zookeeper connection closed. 2024-12-05T12:31:11,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-05T12:31:11,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-05T12:31:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-05T12:31:11,988 INFO [RS:1;2556e7014d8b:42013 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:11,989 INFO [RS:1;2556e7014d8b:42013 {}] regionserver.HRegionServer(1031): Exiting; stopping=2556e7014d8b,42013,1733401868137; zookeeper connection closed. 2024-12-05T12:31:11,990 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e61f9af {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e61f9af 2024-12-05T12:31:11,990 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4f7be718 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4f7be718 2024-12-05T12:31:11,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-05T12:31:11,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006402303_22 at /127.0.0.1:56742 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:36151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56742 dst: /127.0.0.1:36151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:11,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-05T12:31:12,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-05T12:31:12,002 WARN [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:12,003 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/table/225209a98be74cb2824dcc75fd75f54f 2024-12-05T12:31:12,013 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/info/7acbbb6b16e248099be2ac22abe847f5 as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/info/7acbbb6b16e248099be2ac22abe847f5 2024-12-05T12:31:12,022 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/info/7acbbb6b16e248099be2ac22abe847f5, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T12:31:12,024 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/ns/8bf0bb156ca34acdaf59efc2ba84695c as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/ns/8bf0bb156ca34acdaf59efc2ba84695c 2024-12-05T12:31:12,033 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/ns/8bf0bb156ca34acdaf59efc2ba84695c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T12:31:12,034 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/.tmp/table/225209a98be74cb2824dcc75fd75f54f as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/table/225209a98be74cb2824dcc75fd75f54f 2024-12-05T12:31:12,044 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/table/225209a98be74cb2824dcc75fd75f54f, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T12:31:12,046 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 188ms, sequenceid=11, compaction requested=false 2024-12-05T12:31:12,046 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T12:31:12,055 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T12:31:12,056 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T12:31:12,056 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T12:31:12,056 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733401871858Running coprocessor pre-close hooks at 1733401871858Disabling compacts and flushes for region at 1733401871858Disabling writes for close at 1733401871858Obtaining lock to block concurrent updates at 1733401871859 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733401871859Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733401871860 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733401871863 (+3 ms)Flushing 1588230740/info: creating writer at 1733401871864 (+1 ms)Flushing 1588230740/info: appending metadata at 1733401871895 (+31 ms)Flushing 1588230740/info: closing flushed file at 1733401871895Flushing 1588230740/ns: creating writer at 1733401871922 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733401871939 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733401871939Flushing 1588230740/table: creating writer at 1733401871960 (+21 ms)Flushing 1588230740/table: appending metadata at 1733401871981 (+21 ms)Flushing 1588230740/table: closing flushed file at 1733401871981Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42933735: reopening flushed file at 1733401872011 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9c60376: reopening flushed file at 1733401872022 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@483c686b: reopening flushed file at 1733401872033 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 188ms, sequenceid=11, compaction requested=false at 1733401872046 (+13 ms)Writing region close event to WAL at 1733401872048 (+2 ms)Running coprocessor post-close hooks at 1733401872056 (+8 ms)Closed at 1733401872056 2024-12-05T12:31:12,057 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T12:31:12,058 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(976): stopping server 2556e7014d8b,45411,1733401868196; all regions closed. 2024-12-05T12:31:12,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_1073741829_1019 (size=2751) 2024-12-05T12:31:12,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741829_1019 (size=2751) 2024-12-05T12:31:12,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741829_1019 (size=2751) 2024-12-05T12:31:12,065 DEBUG [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs 2024-12-05T12:31:12,065 INFO [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2556e7014d8b%2C45411%2C1733401868196.meta:.meta(num 1733401870033) 2024-12-05T12:31:12,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741826_1016 (size=1298) 2024-12-05T12:31:12,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741826_1016 (size=1298) 2024-12-05T12:31:12,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_1073741826_1016 (size=1298) 2024-12-05T12:31:12,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-05T12:31:12,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-05T12:31:12,072 DEBUG [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/oldWALs 2024-12-05T12:31:12,072 INFO [RS:2;2556e7014d8b:45411 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2556e7014d8b%2C45411%2C1733401868196:(num 1733401869522) 2024-12-05T12:31:12,072 DEBUG [RS:2;2556e7014d8b:45411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:12,072 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:12,072 INFO [RS:2;2556e7014d8b:45411 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:12,072 INFO [RS:2;2556e7014d8b:45411 {}] hbase.ChoreService(370): Chore service for: regionserver/2556e7014d8b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:12,073 INFO [RS:2;2556e7014d8b:45411 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:12,073 INFO [regionserver/2556e7014d8b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:12,073 INFO [RS:2;2556e7014d8b:45411 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45411 2024-12-05T12:31:12,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-05T12:31:12,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-05T12:31:12,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2556e7014d8b,45411,1733401868196 2024-12-05T12:31:12,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:12,076 INFO [RS:2;2556e7014d8b:45411 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:12,077 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2556e7014d8b,45411,1733401868196] 2024-12-05T12:31:12,079 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2556e7014d8b,45411,1733401868196 already deleted, retry=false 2024-12-05T12:31:12,079 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2556e7014d8b,45411,1733401868196 expired; onlineServers=0 2024-12-05T12:31:12,079 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2556e7014d8b,36431,1733401867325' ***** 2024-12-05T12:31:12,079 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T12:31:12,079 INFO [M:0;2556e7014d8b:36431 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:12,079 INFO [M:0;2556e7014d8b:36431 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:12,079 DEBUG [M:0;2556e7014d8b:36431 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T12:31:12,079 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T12:31:12,080 DEBUG [M:0;2556e7014d8b:36431 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T12:31:12,080 DEBUG [master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.large.0-1733401869207 {}] cleaner.HFileCleaner(306): Exit Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.large.0-1733401869207,5,FailOnTimeoutGroup] 2024-12-05T12:31:12,080 DEBUG [master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.small.0-1733401869208 {}] cleaner.HFileCleaner(306): Exit Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.small.0-1733401869208,5,FailOnTimeoutGroup] 2024-12-05T12:31:12,080 INFO [M:0;2556e7014d8b:36431 {}] hbase.ChoreService(370): Chore service for: master/2556e7014d8b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:12,080 INFO [M:0;2556e7014d8b:36431 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:12,080 DEBUG [M:0;2556e7014d8b:36431 {}] master.HMaster(1795): Stopping service threads 2024-12-05T12:31:12,080 INFO [M:0;2556e7014d8b:36431 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T12:31:12,081 INFO [M:0;2556e7014d8b:36431 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T12:31:12,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:12,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:12,081 INFO [M:0;2556e7014d8b:36431 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T12:31:12,081 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T12:31:12,082 DEBUG [M:0;2556e7014d8b:36431 {}] zookeeper.ZKUtil(347): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T12:31:12,082 WARN [M:0;2556e7014d8b:36431 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T12:31:12,083 INFO [M:0;2556e7014d8b:36431 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/.lastflushedseqids 2024-12-05T12:31:12,091 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,091 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,096 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41370 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41370 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:12,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-05T12:31:12,100 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:12,100 INFO [M:0;2556e7014d8b:36431 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T12:31:12,100 INFO [M:0;2556e7014d8b:36431 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T12:31:12,101 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T12:31:12,101 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:12,101 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:12,101 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T12:31:12,101 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:12,101 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-05T12:31:12,119 DEBUG [M:0;2556e7014d8b:36431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b93ce2239324cfe8cc510d6e54239b0 is 82, key is hbase:meta,,1/info:regioninfo/1733401870159/Put/seqid=0 2024-12-05T12:31:12,121 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,121 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,124 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41394 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41394 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:12,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-05T12:31:12,129 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:12,129 INFO [M:0;2556e7014d8b:36431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b93ce2239324cfe8cc510d6e54239b0 2024-12-05T12:31:12,153 DEBUG [M:0;2556e7014d8b:36431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb4d8a73032449ba838a63d7b0cf4fe1 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733401870976/Put/seqid=0 2024-12-05T12:31:12,155 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,155 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41408 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41408 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:12,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-05T12:31:12,162 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:12,163 INFO [M:0;2556e7014d8b:36431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb4d8a73032449ba838a63d7b0cf4fe1 2024-12-05T12:31:12,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:12,177 INFO [RS:2;2556e7014d8b:45411 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:12,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45411-0x100aa6b7be20003, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:12,177 INFO [RS:2;2556e7014d8b:45411 {}] regionserver.HRegionServer(1031): Exiting; stopping=2556e7014d8b,45411,1733401868196; zookeeper connection closed. 2024-12-05T12:31:12,178 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@228ef532 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@228ef532 2024-12-05T12:31:12,178 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T12:31:12,187 DEBUG [M:0;2556e7014d8b:36431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4aefee335417400aa8df7d37a40d2886 is 69, key is 2556e7014d8b,38835,1733401868016/rs:state/1733401869242/Put/seqid=0 2024-12-05T12:31:12,189 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,189 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T12:31:12,191 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_666647316_22 at /127.0.0.1:41424 [Receiving block BP-743128014-172.17.0.2-1733401864075:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41424 dst: /127.0.0.1:33847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:31:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-05T12:31:12,596 WARN [M:0;2556e7014d8b:36431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T12:31:12,596 INFO [M:0;2556e7014d8b:36431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4aefee335417400aa8df7d37a40d2886 2024-12-05T12:31:12,606 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b93ce2239324cfe8cc510d6e54239b0 as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2b93ce2239324cfe8cc510d6e54239b0 2024-12-05T12:31:12,614 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2b93ce2239324cfe8cc510d6e54239b0, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T12:31:12,615 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb4d8a73032449ba838a63d7b0cf4fe1 as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb4d8a73032449ba838a63d7b0cf4fe1 2024-12-05T12:31:12,622 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb4d8a73032449ba838a63d7b0cf4fe1, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T12:31:12,623 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4aefee335417400aa8df7d37a40d2886 as hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4aefee335417400aa8df7d37a40d2886 2024-12-05T12:31:12,630 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4aefee335417400aa8df7d37a40d2886, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T12:31:12,631 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 530ms, sequenceid=72, compaction requested=false 2024-12-05T12:31:12,633 INFO [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:12,633 DEBUG [M:0;2556e7014d8b:36431 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733401872101Disabling compacts and flushes for region at 1733401872101Disabling writes for close at 1733401872101Obtaining lock to block concurrent updates at 1733401872101Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733401872101Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733401872102 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733401872102Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733401872102Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733401872119 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733401872119Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733401872136 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733401872153 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733401872153Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733401872170 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733401872186 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733401872186Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27521282: reopening flushed file at 1733401872605 (+419 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1125c24b: reopening flushed file at 1733401872614 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fa59e58: reopening flushed file at 1733401872622 (+8 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 530ms, sequenceid=72, compaction requested=false at 1733401872631 (+9 ms)Writing region close event to WAL at 1733401872633 (+2 ms)Closed at 1733401872633 2024-12-05T12:31:12,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37519 is added to blk_1073741825_1011 (size=32662) 2024-12-05T12:31:12,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741825_1011 (size=32662) 2024-12-05T12:31:12,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741825_1011 (size=32662) 2024-12-05T12:31:12,637 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:12,637 INFO [M:0;2556e7014d8b:36431 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T12:31:12,637 INFO [M:0;2556e7014d8b:36431 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36431 2024-12-05T12:31:12,638 INFO [M:0;2556e7014d8b:36431 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:12,741 INFO [M:0;2556e7014d8b:36431 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:12,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:12,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36431-0x100aa6b7be20000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:12,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:12,747 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:12,748 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:12,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:12,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:12,750 WARN [BP-743128014-172.17.0.2-1733401864075 heartbeating to localhost/127.0.0.1:43397 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:31:12,750 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:31:12,750 WARN [BP-743128014-172.17.0.2-1733401864075 heartbeating to localhost/127.0.0.1:43397 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743128014-172.17.0.2-1733401864075 (Datanode Uuid 60eee8e3-a461-4da9-8a30-3048fa8df334) service to localhost/127.0.0.1:43397 2024-12-05T12:31:12,751 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:31:12,752 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data5/current/BP-743128014-172.17.0.2-1733401864075 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:12,752 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data6/current/BP-743128014-172.17.0.2-1733401864075 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:12,753 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:31:12,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:12,755 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:12,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:12,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:12,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:12,756 WARN [BP-743128014-172.17.0.2-1733401864075 heartbeating to localhost/127.0.0.1:43397 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:31:12,756 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:31:12,757 WARN [BP-743128014-172.17.0.2-1733401864075 heartbeating to localhost/127.0.0.1:43397 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743128014-172.17.0.2-1733401864075 (Datanode Uuid ad7e54f4-e677-4555-844c-f46253668bca) service to localhost/127.0.0.1:43397 2024-12-05T12:31:12,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:31:12,757 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data3/current/BP-743128014-172.17.0.2-1733401864075 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:12,757 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data4/current/BP-743128014-172.17.0.2-1733401864075 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:12,758 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:31:12,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:12,760 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:12,760 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:12,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:12,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:12,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:31:12,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:31:12,762 WARN [BP-743128014-172.17.0.2-1733401864075 heartbeating to localhost/127.0.0.1:43397 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:31:12,762 WARN [BP-743128014-172.17.0.2-1733401864075 heartbeating to localhost/127.0.0.1:43397 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743128014-172.17.0.2-1733401864075 (Datanode Uuid a86eef30-d633-403a-91d3-5d57c0f5ef07) service to localhost/127.0.0.1:43397 2024-12-05T12:31:12,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data1/current/BP-743128014-172.17.0.2-1733401864075 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:12,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/cluster_6fabe550-8386-ad18-9416-42cb828a3334/data/data2/current/BP-743128014-172.17.0.2-1733401864075 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:12,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:31:12,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T12:31:12,771 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:12,771 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:12,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:12,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:12,780 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T12:31:12,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T12:31:12,818 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=83 (was 157), OpenFileDescriptor=437 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=363 (was 365), ProcessCount=11 (was 11), AvailableMemoryMB=3038 (was 3309) 2024-12-05T12:31:12,824 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=83, OpenFileDescriptor=437, MaxFileDescriptor=1048576, SystemLoadAverage=363, ProcessCount=11, AvailableMemoryMB=3038 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.log.dir so I do NOT create it in target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c851a924-2bfd-c3de-63d3-d5d224b9f115/hadoop.tmp.dir so I do NOT create it in target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e, deleteOnExit=true 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/test.cache.data in system properties and HBase conf 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T12:31:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T12:31:12,826 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:31:12,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/nfs.dump.dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/java.io.tmpdir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T12:31:12,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T12:31:12,923 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:12,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:12,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:12,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:12,929 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:31:12,930 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:12,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41ab5cc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:12,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28bcd4bd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:13,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4453029d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/java.io.tmpdir/jetty-localhost-45729-hadoop-hdfs-3_4_1-tests_jar-_-any-6667079848641533048/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T12:31:13,048 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ad015a3{HTTP/1.1, (http/1.1)}{localhost:45729} 2024-12-05T12:31:13,048 INFO [Time-limited test {}] server.Server(415): Started @11043ms 2024-12-05T12:31:13,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:13,133 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:13,133 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:13,133 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:13,133 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:31:13,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c7a8992{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:13,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c4ebd49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:13,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@56aa9d3b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/java.io.tmpdir/jetty-localhost-44625-hadoop-hdfs-3_4_1-tests_jar-_-any-15048579824076322036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:13,258 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f6e4afc{HTTP/1.1, (http/1.1)}{localhost:44625} 2024-12-05T12:31:13,258 INFO [Time-limited test {}] server.Server(415): Started @11253ms 2024-12-05T12:31:13,259 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:31:13,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:13,304 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:13,304 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:13,304 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:13,304 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T12:31:13,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@505d9ca3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:13,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4148d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:13,352 WARN [Thread-530 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data1/current/BP-242852367-172.17.0.2-1733401872864/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:13,352 WARN [Thread-531 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data2/current/BP-242852367-172.17.0.2-1733401872864/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:13,372 WARN [Thread-509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:31:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26dec2d39331371 with lease ID 0x71eed697a969fb66: Processing first storage report for DS-07a97c45-42a2-4e30-8960-3cc593e2136a from datanode DatanodeRegistration(127.0.0.1:33583, datanodeUuid=7f7d1356-0304-40ec-b960-181a1ed8d933, infoPort=45779, infoSecurePort=0, ipcPort=44393, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864) 2024-12-05T12:31:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26dec2d39331371 with lease ID 0x71eed697a969fb66: from storage DS-07a97c45-42a2-4e30-8960-3cc593e2136a node DatanodeRegistration(127.0.0.1:33583, datanodeUuid=7f7d1356-0304-40ec-b960-181a1ed8d933, infoPort=45779, infoSecurePort=0, ipcPort=44393, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26dec2d39331371 with lease ID 0x71eed697a969fb66: Processing first storage report for DS-8242a40d-9f79-4b36-b592-8b22fe3ac45f from datanode DatanodeRegistration(127.0.0.1:33583, datanodeUuid=7f7d1356-0304-40ec-b960-181a1ed8d933, infoPort=45779, infoSecurePort=0, ipcPort=44393, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864) 2024-12-05T12:31:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26dec2d39331371 with lease ID 0x71eed697a969fb66: from storage DS-8242a40d-9f79-4b36-b592-8b22fe3ac45f node DatanodeRegistration(127.0.0.1:33583, datanodeUuid=7f7d1356-0304-40ec-b960-181a1ed8d933, infoPort=45779, infoSecurePort=0, ipcPort=44393, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:13,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13173478{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/java.io.tmpdir/jetty-localhost-37777-hadoop-hdfs-3_4_1-tests_jar-_-any-1556425242439819820/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:13,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45fbb531{HTTP/1.1, (http/1.1)}{localhost:37777} 2024-12-05T12:31:13,457 INFO [Time-limited test {}] server.Server(415): Started @11452ms 2024-12-05T12:31:13,459 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:31:13,490 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:31:13,493 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:31:13,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:31:13,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:31:13,493 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:31:13,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@660b8bbc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:31:13,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44968fad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:31:13,541 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data3/current/BP-242852367-172.17.0.2-1733401872864/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:13,541 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data4/current/BP-242852367-172.17.0.2-1733401872864/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:13,565 WARN [Thread-545 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:31:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1149b4cbe6894d52 with lease ID 0x71eed697a969fb67: Processing first storage report for DS-a6a277c0-edef-4439-b58a-e8552f1870c6 from datanode DatanodeRegistration(127.0.0.1:35315, datanodeUuid=8fc6ae7d-54f0-4a3f-b91c-2c549f9a008d, infoPort=38251, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864) 2024-12-05T12:31:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1149b4cbe6894d52 with lease ID 0x71eed697a969fb67: from storage DS-a6a277c0-edef-4439-b58a-e8552f1870c6 node DatanodeRegistration(127.0.0.1:35315, datanodeUuid=8fc6ae7d-54f0-4a3f-b91c-2c549f9a008d, infoPort=38251, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1149b4cbe6894d52 with lease ID 0x71eed697a969fb67: Processing first storage report for DS-c9f171a1-5d6b-4c29-aaaa-bbf81c365caa from datanode DatanodeRegistration(127.0.0.1:35315, datanodeUuid=8fc6ae7d-54f0-4a3f-b91c-2c549f9a008d, infoPort=38251, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864) 2024-12-05T12:31:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1149b4cbe6894d52 with lease ID 0x71eed697a969fb67: from storage DS-c9f171a1-5d6b-4c29-aaaa-bbf81c365caa node DatanodeRegistration(127.0.0.1:35315, datanodeUuid=8fc6ae7d-54f0-4a3f-b91c-2c549f9a008d, infoPort=38251, infoSecurePort=0, ipcPort=42725, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:13,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ae2d238{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/java.io.tmpdir/jetty-localhost-41627-hadoop-hdfs-3_4_1-tests_jar-_-any-11849157494418616072/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:13,614 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@700f39d7{HTTP/1.1, (http/1.1)}{localhost:41627} 2024-12-05T12:31:13,614 INFO [Time-limited test {}] server.Server(415): Started @11610ms 2024-12-05T12:31:13,616 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:31:13,699 WARN [Thread-592 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data6/current/BP-242852367-172.17.0.2-1733401872864/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:13,699 WARN [Thread-591 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data5/current/BP-242852367-172.17.0.2-1733401872864/current, will proceed with Du for space computation calculation, 2024-12-05T12:31:13,725 WARN [Thread-580 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:31:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x90139414b2a9977 with lease ID 0x71eed697a969fb68: Processing first storage report for DS-f85c756a-0203-4376-a98b-54d68347ee99 from datanode DatanodeRegistration(127.0.0.1:36987, datanodeUuid=a8066a58-2c59-4809-8a1a-e701dfebdb58, infoPort=43705, infoSecurePort=0, ipcPort=45689, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864) 2024-12-05T12:31:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90139414b2a9977 with lease ID 0x71eed697a969fb68: from storage DS-f85c756a-0203-4376-a98b-54d68347ee99 node DatanodeRegistration(127.0.0.1:36987, datanodeUuid=a8066a58-2c59-4809-8a1a-e701dfebdb58, infoPort=43705, infoSecurePort=0, ipcPort=45689, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x90139414b2a9977 with lease ID 0x71eed697a969fb68: Processing first storage report for DS-1a13ec9a-e3dc-4bf0-bec4-eb74770436bc from datanode DatanodeRegistration(127.0.0.1:36987, datanodeUuid=a8066a58-2c59-4809-8a1a-e701dfebdb58, infoPort=43705, infoSecurePort=0, ipcPort=45689, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864) 2024-12-05T12:31:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90139414b2a9977 with lease ID 0x71eed697a969fb68: from storage DS-1a13ec9a-e3dc-4bf0-bec4-eb74770436bc node DatanodeRegistration(127.0.0.1:36987, datanodeUuid=a8066a58-2c59-4809-8a1a-e701dfebdb58, infoPort=43705, infoSecurePort=0, ipcPort=45689, storageInfo=lv=-57;cid=testClusterID;nsid=1262415533;c=1733401872864), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:31:13,743 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91 2024-12-05T12:31:13,745 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/zookeeper_0, clientPort=53472, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T12:31:13,746 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53472 2024-12-05T12:31:13,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741825_1001 (size=7) 2024-12-05T12:31:13,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741825_1001 (size=7) 2024-12-05T12:31:13,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741825_1001 (size=7) 2024-12-05T12:31:13,769 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6 with version=8 2024-12-05T12:31:13,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43397/user/jenkins/test-data/fce5c5b9-875f-c1e5-2a11-e470edf798df/hbase-staging 2024-12-05T12:31:13,771 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:13,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,771 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:13,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:13,772 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T12:31:13,772 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:13,772 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46829 2024-12-05T12:31:13,774 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46829 connecting to ZooKeeper ensemble=127.0.0.1:53472 2024-12-05T12:31:13,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:468290x0, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:13,780 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46829-0x100aa6b97ef0000 connected 2024-12-05T12:31:13,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:13,797 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6, hbase.cluster.distributed=false 2024-12-05T12:31:13,799 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:13,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46829 2024-12-05T12:31:13,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46829 2024-12-05T12:31:13,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46829 2024-12-05T12:31:13,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46829 2024-12-05T12:31:13,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46829 2024-12-05T12:31:13,815 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:31:13,816 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:13,817 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45367 2024-12-05T12:31:13,818 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45367 connecting to ZooKeeper ensemble=127.0.0.1:53472 2024-12-05T12:31:13,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453670x0, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:13,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45367-0x100aa6b97ef0001 connected 2024-12-05T12:31:13,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:13,825 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:31:13,826 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T12:31:13,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:31:13,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:13,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45367 2024-12-05T12:31:13,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45367 2024-12-05T12:31:13,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45367 2024-12-05T12:31:13,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45367 2024-12-05T12:31:13,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45367 2024-12-05T12:31:13,844 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:31:13,844 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:13,845 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45739 2024-12-05T12:31:13,846 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45739 connecting to ZooKeeper ensemble=127.0.0.1:53472 2024-12-05T12:31:13,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,848 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457390x0, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:13,853 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:457390x0, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:13,853 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45739-0x100aa6b97ef0002 connected 2024-12-05T12:31:13,853 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:31:13,854 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T12:31:13,855 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:31:13,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:13,857 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45739 2024-12-05T12:31:13,857 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45739 2024-12-05T12:31:13,857 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45739 2024-12-05T12:31:13,858 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45739 2024-12-05T12:31:13,858 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45739 2024-12-05T12:31:13,880 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2556e7014d8b:0 server-side Connection retries=45 2024-12-05T12:31:13,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,880 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:31:13,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:31:13,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:31:13,880 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:31:13,881 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:31:13,881 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46755 2024-12-05T12:31:13,883 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46755 connecting to ZooKeeper ensemble=127.0.0.1:53472 2024-12-05T12:31:13,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,886 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467550x0, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:31:13,891 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:467550x0, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:13,891 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46755-0x100aa6b97ef0003 connected 2024-12-05T12:31:13,891 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:31:13,892 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T12:31:13,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:31:13,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:31:13,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46755 2024-12-05T12:31:13,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46755 2024-12-05T12:31:13,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46755 2024-12-05T12:31:13,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46755 2024-12-05T12:31:13,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46755 2024-12-05T12:31:13,915 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2556e7014d8b:46829 2024-12-05T12:31:13,915 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2556e7014d8b,46829,1733401873771 2024-12-05T12:31:13,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,919 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2556e7014d8b,46829,1733401873771 2024-12-05T12:31:13,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:13,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:13,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:13,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,922 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T12:31:13,923 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2556e7014d8b,46829,1733401873771 from backup master directory 2024-12-05T12:31:13,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2556e7014d8b,46829,1733401873771 2024-12-05T12:31:13,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,925 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:13,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:31:13,925 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2556e7014d8b,46829,1733401873771 2024-12-05T12:31:13,932 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/hbase.id] with ID: 0ffa628f-af30-44bb-8653-a3abcd8cf0ec 2024-12-05T12:31:13,932 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/.tmp/hbase.id 2024-12-05T12:31:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741826_1002 (size=42) 2024-12-05T12:31:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741826_1002 (size=42) 2024-12-05T12:31:13,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741826_1002 (size=42) 2024-12-05T12:31:13,942 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/.tmp/hbase.id]:[hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/hbase.id] 2024-12-05T12:31:13,960 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:31:13,960 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T12:31:13,963 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-12-05T12:31:13,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:13,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741827_1003 (size=196) 2024-12-05T12:31:13,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741827_1003 (size=196) 2024-12-05T12:31:13,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741827_1003 (size=196) 2024-12-05T12:31:13,981 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:31:13,982 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T12:31:13,982 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T12:31:13,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741828_1004 (size=1189) 2024-12-05T12:31:13,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741828_1004 (size=1189) 2024-12-05T12:31:13,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741828_1004 (size=1189) 2024-12-05T12:31:14,001 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store 2024-12-05T12:31:14,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741829_1005 (size=34) 2024-12-05T12:31:14,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741829_1005 (size=34) 2024-12-05T12:31:14,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741829_1005 (size=34) 2024-12-05T12:31:14,011 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:14,011 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T12:31:14,011 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:14,011 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:14,011 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T12:31:14,011 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:14,011 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:14,011 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733401874011Disabling compacts and flushes for region at 1733401874011Disabling writes for close at 1733401874011Writing region close event to WAL at 1733401874011Closed at 1733401874011 2024-12-05T12:31:14,013 WARN [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/.initializing 2024-12-05T12:31:14,013 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/WALs/2556e7014d8b,46829,1733401873771 2024-12-05T12:31:14,017 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C46829%2C1733401873771, suffix=, logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/WALs/2556e7014d8b,46829,1733401873771, archiveDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/oldWALs, maxLogs=10 2024-12-05T12:31:14,018 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2556e7014d8b%2C46829%2C1733401873771.1733401874017 2024-12-05T12:31:14,031 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/WALs/2556e7014d8b,46829,1733401873771/2556e7014d8b%2C46829%2C1733401873771.1733401874017 2024-12-05T12:31:14,034 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:45779:45779),(127.0.0.1/127.0.0.1:38251:38251)] 2024-12-05T12:31:14,038 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:31:14,039 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:14,039 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,039 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T12:31:14,042 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T12:31:14,045 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:14,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T12:31:14,048 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:14,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T12:31:14,051 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:14,051 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,052 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,053 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,055 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,055 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,055 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:31:14,057 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:31:14,060 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:31:14,061 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61960895, jitterRate=-0.07671071588993073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:31:14,062 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733401874039Initializing all the Stores at 1733401874040 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874040Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401874040Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401874040Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401874040Cleaning up temporary data from old regions at 1733401874055 (+15 ms)Region opened successfully at 1733401874062 (+7 ms) 2024-12-05T12:31:14,064 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T12:31:14,069 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ee825b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:14,070 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T12:31:14,070 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T12:31:14,070 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T12:31:14,070 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T12:31:14,071 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T12:31:14,071 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T12:31:14,071 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T12:31:14,074 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T12:31:14,075 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T12:31:14,076 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T12:31:14,076 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T12:31:14,077 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T12:31:14,078 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T12:31:14,079 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T12:31:14,079 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T12:31:14,080 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T12:31:14,082 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T12:31:14,083 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T12:31:14,086 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T12:31:14,087 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:14,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,091 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2556e7014d8b,46829,1733401873771, sessionid=0x100aa6b97ef0000, setting cluster-up flag (Was=false) 2024-12-05T12:31:14,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,101 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T12:31:14,102 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2556e7014d8b,46829,1733401873771 2024-12-05T12:31:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,111 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T12:31:14,112 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2556e7014d8b,46829,1733401873771 2024-12-05T12:31:14,114 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T12:31:14,117 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:14,117 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T12:31:14,117 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T12:31:14,117 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2556e7014d8b,46829,1733401873771 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2556e7014d8b:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2556e7014d8b:0, corePoolSize=10, maxPoolSize=10 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:14,119 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733401904122 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T12:31:14,122 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T12:31:14,123 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,124 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T12:31:14,124 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:14,125 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T12:31:14,125 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T12:31:14,125 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T12:31:14,125 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T12:31:14,125 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T12:31:14,125 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.large.0-1733401874125,5,FailOnTimeoutGroup] 2024-12-05T12:31:14,126 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.small.0-1733401874125,5,FailOnTimeoutGroup] 2024-12-05T12:31:14,126 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,126 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T12:31:14,126 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,126 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,127 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,127 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T12:31:14,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741831_1007 (size=1321) 2024-12-05T12:31:14,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741831_1007 (size=1321) 2024-12-05T12:31:14,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741831_1007 (size=1321) 2024-12-05T12:31:14,145 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T12:31:14,145 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6 2024-12-05T12:31:14,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741832_1008 (size=32) 2024-12-05T12:31:14,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741832_1008 (size=32) 2024-12-05T12:31:14,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741832_1008 (size=32) 2024-12-05T12:31:14,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:14,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T12:31:14,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T12:31:14,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T12:31:14,166 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T12:31:14,166 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T12:31:14,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T12:31:14,168 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T12:31:14,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T12:31:14,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T12:31:14,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740 2024-12-05T12:31:14,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740 2024-12-05T12:31:14,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T12:31:14,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T12:31:14,176 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:31:14,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T12:31:14,181 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:31:14,181 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63921114, jitterRate=-0.047501176595687866}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:31:14,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733401874161Initializing all the Stores at 1733401874162 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874162Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874162Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401874162Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874162Cleaning up temporary data from old regions at 1733401874175 (+13 ms)Region opened successfully at 1733401874182 (+7 ms) 2024-12-05T12:31:14,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T12:31:14,183 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T12:31:14,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T12:31:14,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T12:31:14,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T12:31:14,183 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T12:31:14,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733401874183Disabling compacts and flushes for region at 1733401874183Disabling writes for close at 1733401874183Writing region close event to WAL at 1733401874183Closed at 1733401874183 2024-12-05T12:31:14,186 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:14,186 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T12:31:14,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T12:31:14,188 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T12:31:14,189 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T12:31:14,206 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(746): ClusterId : 0ffa628f-af30-44bb-8653-a3abcd8cf0ec 2024-12-05T12:31:14,206 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(746): ClusterId : 0ffa628f-af30-44bb-8653-a3abcd8cf0ec 2024-12-05T12:31:14,206 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(746): ClusterId : 0ffa628f-af30-44bb-8653-a3abcd8cf0ec 2024-12-05T12:31:14,206 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:31:14,206 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:31:14,206 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:31:14,209 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:31:14,209 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:31:14,209 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:31:14,209 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:31:14,211 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:31:14,212 DEBUG [RS:2;2556e7014d8b:46755 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3692eac5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:14,212 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:31:14,212 DEBUG [RS:0;2556e7014d8b:45367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce82a46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:14,216 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:31:14,216 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:31:14,218 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:31:14,219 DEBUG [RS:1;2556e7014d8b:45739 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7172ada0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2556e7014d8b/172.17.0.2:0 2024-12-05T12:31:14,226 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2556e7014d8b:46755 2024-12-05T12:31:14,227 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:31:14,227 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:31:14,227 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:31:14,228 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(2659): reportForDuty to master=2556e7014d8b,46829,1733401873771 with port=46755, startcode=1733401873880 2024-12-05T12:31:14,228 DEBUG [RS:2;2556e7014d8b:46755 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:31:14,231 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33547, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:31:14,232 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,232 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46829 {}] master.ServerManager(517): Registering regionserver=2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,233 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2556e7014d8b:45739 2024-12-05T12:31:14,233 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:31:14,233 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:31:14,233 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:31:14,234 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(2659): reportForDuty to master=2556e7014d8b,46829,1733401873771 with port=45739, startcode=1733401873843 2024-12-05T12:31:14,234 DEBUG [RS:1;2556e7014d8b:45739 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:31:14,234 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6 2024-12-05T12:31:14,234 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42703 2024-12-05T12:31:14,234 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:31:14,236 DEBUG [RS:0;2556e7014d8b:45367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2556e7014d8b:45367 2024-12-05T12:31:14,236 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:31:14,236 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:31:14,236 DEBUG [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:31:14,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:14,237 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(2659): reportForDuty to master=2556e7014d8b,46829,1733401873771 with port=45367, startcode=1733401873815 2024-12-05T12:31:14,237 DEBUG [RS:0;2556e7014d8b:45367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:31:14,238 DEBUG [RS:2;2556e7014d8b:46755 {}] zookeeper.ZKUtil(111): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,238 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2556e7014d8b,46755,1733401873880] 2024-12-05T12:31:14,238 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43345, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:31:14,238 WARN [RS:2;2556e7014d8b:46755 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:14,238 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2556e7014d8b,45739,1733401873843 2024-12-05T12:31:14,238 INFO [RS:2;2556e7014d8b:46755 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T12:31:14,238 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46829 {}] master.ServerManager(517): Registering regionserver=2556e7014d8b,45739,1733401873843 2024-12-05T12:31:14,238 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,239 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44455, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:31:14,240 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2556e7014d8b,45367,1733401873815 2024-12-05T12:31:14,240 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46829 {}] master.ServerManager(517): Registering regionserver=2556e7014d8b,45367,1733401873815 2024-12-05T12:31:14,241 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6 2024-12-05T12:31:14,241 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42703 2024-12-05T12:31:14,241 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:31:14,242 DEBUG [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6 2024-12-05T12:31:14,242 DEBUG [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42703 2024-12-05T12:31:14,242 DEBUG [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:31:14,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:14,248 DEBUG [RS:0;2556e7014d8b:45367 {}] zookeeper.ZKUtil(111): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2556e7014d8b,45367,1733401873815 2024-12-05T12:31:14,248 WARN [RS:0;2556e7014d8b:45367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:14,248 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2556e7014d8b,45367,1733401873815] 2024-12-05T12:31:14,248 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2556e7014d8b,45739,1733401873843] 2024-12-05T12:31:14,249 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:31:14,250 DEBUG [RS:1;2556e7014d8b:45739 {}] zookeeper.ZKUtil(111): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2556e7014d8b,45739,1733401873843 2024-12-05T12:31:14,250 WARN [RS:1;2556e7014d8b:45739 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:31:14,250 INFO [RS:1;2556e7014d8b:45739 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T12:31:14,250 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,45739,1733401873843 2024-12-05T12:31:14,251 INFO [RS:0;2556e7014d8b:45367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T12:31:14,251 DEBUG [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,45367,1733401873815 2024-12-05T12:31:14,256 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:31:14,263 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:31:14,265 INFO [RS:2;2556e7014d8b:46755 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:31:14,265 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,273 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:31:14,273 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:31:14,273 INFO [RS:1;2556e7014d8b:45739 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:31:14,274 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,276 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:31:14,277 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:31:14,277 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:31:14,277 INFO [RS:0;2556e7014d8b:45367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:31:14,277 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,278 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:31:14,278 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,278 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,278 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,278 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,278 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,278 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:14,279 DEBUG [RS:1;2556e7014d8b:45739 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:14,280 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:31:14,281 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:14,281 DEBUG [RS:2;2556e7014d8b:46755 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:14,283 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:31:14,284 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:31:14,284 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,45739,1733401873843-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,284 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,284 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46755,1733401873880-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2556e7014d8b:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2556e7014d8b:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:14,285 DEBUG [RS:0;2556e7014d8b:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:31:14,286 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,286 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,286 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,286 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,286 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,286 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,45367,1733401873815-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:14,300 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:31:14,300 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46755,1733401873880-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,300 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,300 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.Replication(171): 2556e7014d8b,46755,1733401873880 started 2024-12-05T12:31:14,301 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:31:14,301 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,45739,1733401873843-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,301 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,301 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.Replication(171): 2556e7014d8b,45739,1733401873843 started 2024-12-05T12:31:14,302 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:31:14,303 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,45367,1733401873815-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,303 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,303 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.Replication(171): 2556e7014d8b,45367,1733401873815 started 2024-12-05T12:31:14,314 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,315 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1482): Serving as 2556e7014d8b,46755,1733401873880, RpcServer on 2556e7014d8b/172.17.0.2:46755, sessionid=0x100aa6b97ef0003 2024-12-05T12:31:14,315 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:31:14,315 DEBUG [RS:2;2556e7014d8b:46755 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,315 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,46755,1733401873880' 2024-12-05T12:31:14,315 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:31:14,316 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,316 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:31:14,316 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1482): Serving as 2556e7014d8b,45739,1733401873843, RpcServer on 2556e7014d8b/172.17.0.2:45739, sessionid=0x100aa6b97ef0002 2024-12-05T12:31:14,316 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:31:14,316 DEBUG [RS:1;2556e7014d8b:45739 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2556e7014d8b,45739,1733401873843 2024-12-05T12:31:14,316 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,45739,1733401873843' 2024-12-05T12:31:14,316 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:31:14,316 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:31:14,316 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:31:14,316 DEBUG [RS:2;2556e7014d8b:46755 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,316 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,46755,1733401873880' 2024-12-05T12:31:14,316 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:31:14,317 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:31:14,317 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,317 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(1482): Serving as 2556e7014d8b,45367,1733401873815, RpcServer on 2556e7014d8b/172.17.0.2:45367, sessionid=0x100aa6b97ef0001 2024-12-05T12:31:14,317 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:31:14,317 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:31:14,317 DEBUG [RS:0;2556e7014d8b:45367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2556e7014d8b,45367,1733401873815 2024-12-05T12:31:14,317 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,45367,1733401873815' 2024-12-05T12:31:14,317 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:31:14,317 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:31:14,317 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:31:14,317 DEBUG [RS:1;2556e7014d8b:45739 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2556e7014d8b,45739,1733401873843 2024-12-05T12:31:14,317 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,45739,1733401873843' 2024-12-05T12:31:14,317 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:31:14,317 DEBUG [RS:2;2556e7014d8b:46755 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:31:14,318 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:31:14,318 INFO [RS:2;2556e7014d8b:46755 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:31:14,318 INFO [RS:2;2556e7014d8b:46755 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:31:14,318 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:31:14,318 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:31:14,318 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:31:14,318 DEBUG [RS:0;2556e7014d8b:45367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2556e7014d8b,45367,1733401873815 2024-12-05T12:31:14,318 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2556e7014d8b,45367,1733401873815' 2024-12-05T12:31:14,318 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:31:14,318 DEBUG [RS:1;2556e7014d8b:45739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:31:14,318 INFO [RS:1;2556e7014d8b:45739 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:31:14,318 INFO [RS:1;2556e7014d8b:45739 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:31:14,319 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:31:14,319 DEBUG [RS:0;2556e7014d8b:45367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:31:14,319 INFO [RS:0;2556e7014d8b:45367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:31:14,319 INFO [RS:0;2556e7014d8b:45367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:31:14,340 WARN [2556e7014d8b:46829 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T12:31:14,420 INFO [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C46755%2C1733401873880, suffix=, logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,46755,1733401873880, archiveDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs, maxLogs=32 2024-12-05T12:31:14,421 INFO [RS:1;2556e7014d8b:45739 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C45739%2C1733401873843, suffix=, logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,45739,1733401873843, archiveDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs, maxLogs=32 2024-12-05T12:31:14,422 INFO [RS:0;2556e7014d8b:45367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C45367%2C1733401873815, suffix=, logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,45367,1733401873815, archiveDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs, maxLogs=32 2024-12-05T12:31:14,423 INFO [RS:2;2556e7014d8b:46755 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2556e7014d8b%2C46755%2C1733401873880.1733401874423 2024-12-05T12:31:14,424 INFO [RS:1;2556e7014d8b:45739 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2556e7014d8b%2C45739%2C1733401873843.1733401874423 2024-12-05T12:31:14,424 INFO [RS:0;2556e7014d8b:45367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2556e7014d8b%2C45367%2C1733401873815.1733401874423 2024-12-05T12:31:14,434 INFO [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,46755,1733401873880/2556e7014d8b%2C46755%2C1733401873880.1733401874423 2024-12-05T12:31:14,435 INFO [RS:1;2556e7014d8b:45739 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,45739,1733401873843/2556e7014d8b%2C45739%2C1733401873843.1733401874423 2024-12-05T12:31:14,436 DEBUG [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38251:38251),(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:45779:45779)] 2024-12-05T12:31:14,438 DEBUG [RS:1;2556e7014d8b:45739 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45779:45779),(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:38251:38251)] 2024-12-05T12:31:14,438 INFO [RS:0;2556e7014d8b:45367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,45367,1733401873815/2556e7014d8b%2C45367%2C1733401873815.1733401874423 2024-12-05T12:31:14,439 DEBUG [RS:0;2556e7014d8b:45367 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:38251:38251),(127.0.0.1/127.0.0.1:45779:45779)] 2024-12-05T12:31:14,590 DEBUG [2556e7014d8b:46829 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T12:31:14,590 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(204): Hosts are {2556e7014d8b=0} racks are {/default-rack=0} 2024-12-05T12:31:14,592 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:31:14,593 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:31:14,593 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:31:14,593 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:31:14,593 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:31:14,593 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:31:14,593 INFO [2556e7014d8b:46829 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:31:14,593 INFO [2556e7014d8b:46829 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:31:14,593 INFO [2556e7014d8b:46829 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:31:14,593 DEBUG [2556e7014d8b:46829 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:31:14,593 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,595 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2556e7014d8b,46755,1733401873880, state=OPENING 2024-12-05T12:31:14,597 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T12:31:14,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:14,599 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T12:31:14,599 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,599 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2556e7014d8b,46755,1733401873880}] 2024-12-05T12:31:14,599 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,599 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,754 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:31:14,755 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48631, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:31:14,760 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T12:31:14,760 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T12:31:14,763 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2556e7014d8b%2C46755%2C1733401873880.meta, suffix=.meta, logDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,46755,1733401873880, archiveDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs, maxLogs=32 2024-12-05T12:31:14,764 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2556e7014d8b%2C46755%2C1733401873880.meta.1733401874764.meta 2024-12-05T12:31:14,771 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/WALs/2556e7014d8b,46755,1733401873880/2556e7014d8b%2C46755%2C1733401873880.meta.1733401874764.meta 2024-12-05T12:31:14,775 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38251:38251),(127.0.0.1/127.0.0.1:45779:45779),(127.0.0.1/127.0.0.1:43705:43705)] 2024-12-05T12:31:14,776 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:31:14,776 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T12:31:14,776 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T12:31:14,776 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T12:31:14,776 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T12:31:14,776 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:14,776 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T12:31:14,777 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T12:31:14,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T12:31:14,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T12:31:14,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T12:31:14,782 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T12:31:14,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T12:31:14,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T12:31:14,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T12:31:14,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T12:31:14,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:31:14,787 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T12:31:14,788 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740 2024-12-05T12:31:14,790 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740 2024-12-05T12:31:14,792 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T12:31:14,792 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T12:31:14,793 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:31:14,795 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T12:31:14,797 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60595061, jitterRate=-0.09706322848796844}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:31:14,797 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T12:31:14,798 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733401874777Writing region info on filesystem at 1733401874777Initializing all the Stores at 1733401874778 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874779 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401874780 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733401874780Cleaning up temporary data from old regions at 1733401874792 (+12 ms)Running coprocessor post-open hooks at 1733401874797 (+5 ms)Region opened successfully at 1733401874798 (+1 ms) 2024-12-05T12:31:14,800 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733401874753 2024-12-05T12:31:14,803 DEBUG [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T12:31:14,803 INFO [RS_OPEN_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T12:31:14,805 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,807 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2556e7014d8b,46755,1733401873880, state=OPEN 2024-12-05T12:31:14,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:14,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:14,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:14,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:31:14,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:31:14,809 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2556e7014d8b,46755,1733401873880 2024-12-05T12:31:14,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T12:31:14,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2556e7014d8b,46755,1733401873880 in 210 msec 2024-12-05T12:31:14,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T12:31:14,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 628 msec 2024-12-05T12:31:14,820 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:31:14,821 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T12:31:14,822 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:31:14,822 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2556e7014d8b,46755,1733401873880, seqNum=-1] 2024-12-05T12:31:14,823 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:31:14,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51941, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:31:14,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 717 msec 2024-12-05T12:31:14,835 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733401874835, completionTime=-1 2024-12-05T12:31:14,835 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T12:31:14,835 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T12:31:14,838 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T12:31:14,838 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733401934838 2024-12-05T12:31:14,838 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733401994838 2024-12-05T12:31:14,838 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-05T12:31:14,839 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46829,1733401873771-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,839 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46829,1733401873771-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,839 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46829,1733401873771-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,839 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2556e7014d8b:46829, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,839 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,842 DEBUG [master/2556e7014d8b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T12:31:14,845 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,852 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.926sec 2024-12-05T12:31:14,852 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T12:31:14,852 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T12:31:14,852 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T12:31:14,852 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T12:31:14,853 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T12:31:14,853 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46829,1733401873771-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:31:14,854 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46829,1733401873771-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T12:31:14,860 DEBUG [master/2556e7014d8b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T12:31:14,860 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T12:31:14,861 INFO [master/2556e7014d8b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2556e7014d8b,46829,1733401873771-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:31:14,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54a1d71a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:31:14,907 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2556e7014d8b,46829,-1 for getting cluster id 2024-12-05T12:31:14,907 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:31:14,909 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0ffa628f-af30-44bb-8653-a3abcd8cf0ec' 2024-12-05T12:31:14,910 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:31:14,910 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0ffa628f-af30-44bb-8653-a3abcd8cf0ec" 2024-12-05T12:31:14,911 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27af5efa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:31:14,911 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2556e7014d8b,46829,-1] 2024-12-05T12:31:14,911 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:31:14,912 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:14,914 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:31:14,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba23e7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:31:14,915 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:31:14,916 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2556e7014d8b,46755,1733401873880, seqNum=-1] 2024-12-05T12:31:14,917 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:31:14,918 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:31:14,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2556e7014d8b,46829,1733401873771 2024-12-05T12:31:14,921 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T12:31:14,922 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 2556e7014d8b,46829,1733401873771 2024-12-05T12:31:14,922 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@58f5cb72 2024-12-05T12:31:14,922 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T12:31:14,924 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T12:31:14,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:31:14,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T12:31:14,928 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:31:14,929 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:14,929 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T12:31:14,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:14,930 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:31:14,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741837_1013 (size=392) 2024-12-05T12:31:14,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741837_1013 (size=392) 2024-12-05T12:31:14,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741837_1013 (size=392) 2024-12-05T12:31:14,950 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dffcee3b8076f57f3123f051c98c888f, NAME => 'TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6 2024-12-05T12:31:14,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741838_1014 (size=51) 2024-12-05T12:31:14,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741838_1014 (size=51) 2024-12-05T12:31:14,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741838_1014 (size=51) 2024-12-05T12:31:14,963 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:14,964 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing dffcee3b8076f57f3123f051c98c888f, disabling compactions & flushes 2024-12-05T12:31:14,964 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:14,964 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:14,964 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. after waiting 0 ms 2024-12-05T12:31:14,964 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:14,964 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:14,964 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for dffcee3b8076f57f3123f051c98c888f: Waiting for close lock at 1733401874964Disabling compacts and flushes for region at 1733401874964Disabling writes for close at 1733401874964Writing region close event to WAL at 1733401874964Closed at 1733401874964 2024-12-05T12:31:14,966 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:31:14,966 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733401874966"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733401874966"}]},"ts":"1733401874966"} 2024-12-05T12:31:14,971 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T12:31:14,973 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:31:14,973 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733401874973"}]},"ts":"1733401874973"} 2024-12-05T12:31:14,976 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T12:31:14,976 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {2556e7014d8b=0} racks are {/default-rack=0} 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:31:14,977 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:31:14,977 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:31:14,977 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:31:14,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:31:14,977 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=dffcee3b8076f57f3123f051c98c888f, ASSIGN}] 2024-12-05T12:31:14,979 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=dffcee3b8076f57f3123f051c98c888f, ASSIGN 2024-12-05T12:31:14,981 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=dffcee3b8076f57f3123f051c98c888f, ASSIGN; state=OFFLINE, location=2556e7014d8b,45739,1733401873843; forceNewPlan=false, retain=false 2024-12-05T12:31:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:15,132 INFO [2556e7014d8b:46829 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T12:31:15,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dffcee3b8076f57f3123f051c98c888f, regionState=OPENING, regionLocation=2556e7014d8b,45739,1733401873843 2024-12-05T12:31:15,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=dffcee3b8076f57f3123f051c98c888f, ASSIGN because future has completed 2024-12-05T12:31:15,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dffcee3b8076f57f3123f051c98c888f, server=2556e7014d8b,45739,1733401873843}] 2024-12-05T12:31:15,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:15,294 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:31:15,296 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53973, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:31:15,302 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,302 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => dffcee3b8076f57f3123f051c98c888f, NAME => 'TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:31:15,302 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,302 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:31:15,302 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,302 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,304 INFO [StoreOpener-dffcee3b8076f57f3123f051c98c888f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,306 INFO [StoreOpener-dffcee3b8076f57f3123f051c98c888f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dffcee3b8076f57f3123f051c98c888f columnFamilyName cf 2024-12-05T12:31:15,306 DEBUG [StoreOpener-dffcee3b8076f57f3123f051c98c888f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:31:15,306 INFO [StoreOpener-dffcee3b8076f57f3123f051c98c888f-1 {}] regionserver.HStore(327): Store=dffcee3b8076f57f3123f051c98c888f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:31:15,307 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,307 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,308 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,308 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,308 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,310 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,313 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:31:15,314 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened dffcee3b8076f57f3123f051c98c888f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66220419, jitterRate=-0.013238862156867981}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:31:15,314 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,315 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for dffcee3b8076f57f3123f051c98c888f: Running coprocessor pre-open hook at 1733401875303Writing region info on filesystem at 1733401875303Initializing all the Stores at 1733401875304 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733401875304Cleaning up temporary data from old regions at 1733401875308 (+4 ms)Running coprocessor post-open hooks at 1733401875314 (+6 ms)Region opened successfully at 1733401875315 (+1 ms) 2024-12-05T12:31:15,317 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f., pid=6, masterSystemTime=1733401875294 2024-12-05T12:31:15,320 DEBUG [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,320 INFO [RS_OPEN_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,321 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dffcee3b8076f57f3123f051c98c888f, regionState=OPEN, openSeqNum=2, regionLocation=2556e7014d8b,45739,1733401873843 2024-12-05T12:31:15,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dffcee3b8076f57f3123f051c98c888f, server=2556e7014d8b,45739,1733401873843 because future has completed 2024-12-05T12:31:15,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T12:31:15,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure dffcee3b8076f57f3123f051c98c888f, server=2556e7014d8b,45739,1733401873843 in 187 msec 2024-12-05T12:31:15,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T12:31:15,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=dffcee3b8076f57f3123f051c98c888f, ASSIGN in 355 msec 2024-12-05T12:31:15,338 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:31:15,338 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733401875338"}]},"ts":"1733401875338"} 2024-12-05T12:31:15,342 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T12:31:15,343 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:31:15,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 418 msec 2024-12-05T12:31:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:31:15,559 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T12:31:15,559 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T12:31:15,559 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:31:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T12:31:15,563 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:31:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T12:31:15,566 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f., hostname=2556e7014d8b,45739,1733401873843, seqNum=2] 2024-12-05T12:31:15,567 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:31:15,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40036, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:31:15,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T12:31:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T12:31:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:15,578 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T12:31:15,579 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T12:31:15,581 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T12:31:15,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T12:31:15,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T12:31:15,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T12:31:15,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T12:31:15,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:15,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45739 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T12:31:15,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,738 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing dffcee3b8076f57f3123f051c98c888f 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T12:31:15,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/.tmp/cf/0f0e99d6e2674f6daea747adb36c1ae7 is 36, key is row/cf:cq/1733401875571/Put/seqid=0 2024-12-05T12:31:15,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741839_1015 (size=4787) 2024-12-05T12:31:15,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741839_1015 (size=4787) 2024-12-05T12:31:15,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741839_1015 (size=4787) 2024-12-05T12:31:15,790 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/.tmp/cf/0f0e99d6e2674f6daea747adb36c1ae7 2024-12-05T12:31:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/.tmp/cf/0f0e99d6e2674f6daea747adb36c1ae7 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/cf/0f0e99d6e2674f6daea747adb36c1ae7 2024-12-05T12:31:15,814 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/cf/0f0e99d6e2674f6daea747adb36c1ae7, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T12:31:15,816 INFO [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for dffcee3b8076f57f3123f051c98c888f in 78ms, sequenceid=5, compaction requested=false 2024-12-05T12:31:15,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for dffcee3b8076f57f3123f051c98c888f: 2024-12-05T12:31:15,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2556e7014d8b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T12:31:15,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T12:31:15,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T12:31:15,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 238 msec 2024-12-05T12:31:15,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 254 msec 2024-12-05T12:31:15,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:31:15,898 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T12:31:15,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T12:31:15,903 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:31:15,904 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:15,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,904 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T12:31:15,904 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=35211682, stopped=false 2024-12-05T12:31:15,904 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:31:15,904 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2556e7014d8b,46829,1733401873771 2024-12-05T12:31:15,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:15,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:15,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:31:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:15,907 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T12:31:15,907 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:31:15,908 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:15,908 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:15,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,908 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:15,908 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2556e7014d8b,45367,1733401873815' ***** 2024-12-05T12:31:15,908 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:15,908 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:31:15,908 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2556e7014d8b,45739,1733401873843' ***** 2024-12-05T12:31:15,908 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:31:15,908 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:31:15,909 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(959): stopping server 2556e7014d8b,45367,1733401873815 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2556e7014d8b:45367. 2024-12-05T12:31:15,909 DEBUG [RS:0;2556e7014d8b:45367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:15,909 DEBUG [RS:0;2556e7014d8b:45367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,909 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(976): stopping server 2556e7014d8b,45367,1733401873815; all regions closed. 2024-12-05T12:31:15,911 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:31:15,911 INFO [RS:1;2556e7014d8b:45739 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:31:15,911 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:31:15,911 INFO [RS:1;2556e7014d8b:45739 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:31:15,911 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(3091): Received CLOSE for dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,911 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2556e7014d8b,46755,1733401873880' ***** 2024-12-05T12:31:15,911 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:31:15,911 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:31:15,911 INFO [RS:2;2556e7014d8b:46755 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:31:15,911 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:31:15,911 INFO [RS:2;2556e7014d8b:46755 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(959): stopping server 2556e7014d8b,46755,1733401873880 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2556e7014d8b:46755. 2024-12-05T12:31:15,912 DEBUG [RS:2;2556e7014d8b:46755 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:15,912 DEBUG [RS:2;2556e7014d8b:46755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:31:15,912 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T12:31:15,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:15,923 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:15,923 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:15,923 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:15,923 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:15,932 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(959): stopping server 2556e7014d8b,45739,1733401873843 2024-12-05T12:31:15,932 INFO [RS:1;2556e7014d8b:45739 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:15,933 INFO [RS:1;2556e7014d8b:45739 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2556e7014d8b:45739. 2024-12-05T12:31:15,933 DEBUG [RS:1;2556e7014d8b:45739 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:31:15,933 DEBUG [RS:1;2556e7014d8b:45739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,933 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T12:31:15,933 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1325): Online Regions={dffcee3b8076f57f3123f051c98c888f=TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f.} 2024-12-05T12:31:15,933 DEBUG [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1351): Waiting on dffcee3b8076f57f3123f051c98c888f 2024-12-05T12:31:15,936 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T12:31:15,936 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T12:31:15,936 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dffcee3b8076f57f3123f051c98c888f, disabling compactions & flushes 2024-12-05T12:31:15,936 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T12:31:15,936 INFO [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,936 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T12:31:15,936 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T12:31:15,936 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,937 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T12:31:15,937 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T12:31:15,937 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. after waiting 0 ms 2024-12-05T12:31:15,937 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T12:31:15,937 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,937 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T12:31:15,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741835_1011 (size=93) 2024-12-05T12:31:15,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741835_1011 (size=93) 2024-12-05T12:31:15,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741835_1011 (size=93) 2024-12-05T12:31:15,953 DEBUG [RS:0;2556e7014d8b:45367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs 2024-12-05T12:31:15,953 INFO [RS:0;2556e7014d8b:45367 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2556e7014d8b%2C45367%2C1733401873815:(num 1733401874423) 2024-12-05T12:31:15,953 DEBUG [RS:0;2556e7014d8b:45367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:15,953 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] hbase.ChoreService(370): Chore service for: regionserver/2556e7014d8b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:31:15,954 INFO [regionserver/2556e7014d8b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:15,954 INFO [RS:0;2556e7014d8b:45367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45367 2024-12-05T12:31:15,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:15,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2556e7014d8b,45367,1733401873815 2024-12-05T12:31:15,956 INFO [RS:0;2556e7014d8b:45367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:15,957 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2556e7014d8b,45367,1733401873815] 2024-12-05T12:31:15,959 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2556e7014d8b,45367,1733401873815 already deleted, retry=false 2024-12-05T12:31:15,959 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2556e7014d8b,45367,1733401873815 expired; onlineServers=2 2024-12-05T12:31:15,968 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/info/409e244e48074d8f9d7f3eaafa4b00e2 is 153, key is TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f./info:regioninfo/1733401875321/Put/seqid=0 2024-12-05T12:31:15,969 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/default/TestHBaseWalOnEC/dffcee3b8076f57f3123f051c98c888f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:31:15,971 INFO [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,971 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dffcee3b8076f57f3123f051c98c888f: Waiting for close lock at 1733401875933Running coprocessor pre-close hooks at 1733401875933Disabling compacts and flushes for region at 1733401875936 (+3 ms)Disabling writes for close at 1733401875937 (+1 ms)Writing region close event to WAL at 1733401875962 (+25 ms)Running coprocessor post-close hooks at 1733401875971 (+9 ms)Closed at 1733401875971 2024-12-05T12:31:15,971 DEBUG [RS_CLOSE_REGION-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733401874924.dffcee3b8076f57f3123f051c98c888f. 2024-12-05T12:31:15,986 INFO [regionserver/2556e7014d8b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:15,986 INFO [regionserver/2556e7014d8b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:15,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741840_1016 (size=6637) 2024-12-05T12:31:15,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741840_1016 (size=6637) 2024-12-05T12:31:15,990 INFO [regionserver/2556e7014d8b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:15,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741840_1016 (size=6637) 2024-12-05T12:31:15,991 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/info/409e244e48074d8f9d7f3eaafa4b00e2 2024-12-05T12:31:16,031 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/ns/cbb088f815e74b42a62bd0b990a22b21 is 43, key is default/ns:d/1733401874826/Put/seqid=0 2024-12-05T12:31:16,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741841_1017 (size=5153) 2024-12-05T12:31:16,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x100aa6b97ef0001, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,059 INFO [RS:0;2556e7014d8b:45367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:16,059 INFO [RS:0;2556e7014d8b:45367 {}] regionserver.HRegionServer(1031): Exiting; stopping=2556e7014d8b,45367,1733401873815; zookeeper connection closed. 2024-12-05T12:31:16,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741841_1017 (size=5153) 2024-12-05T12:31:16,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741841_1017 (size=5153) 2024-12-05T12:31:16,061 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/ns/cbb088f815e74b42a62bd0b990a22b21 2024-12-05T12:31:16,065 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9c285e1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9c285e1 2024-12-05T12:31:16,100 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/table/b56ac25866254a5db4c5d10006979588 is 52, key is TestHBaseWalOnEC/table:state/1733401875338/Put/seqid=0 2024-12-05T12:31:16,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741842_1018 (size=5249) 2024-12-05T12:31:16,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741842_1018 (size=5249) 2024-12-05T12:31:16,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741842_1018 (size=5249) 2024-12-05T12:31:16,117 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/table/b56ac25866254a5db4c5d10006979588 2024-12-05T12:31:16,126 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/info/409e244e48074d8f9d7f3eaafa4b00e2 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/info/409e244e48074d8f9d7f3eaafa4b00e2 2024-12-05T12:31:16,133 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(976): stopping server 2556e7014d8b,45739,1733401873843; all regions closed. 2024-12-05T12:31:16,136 DEBUG [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T12:31:16,136 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/info/409e244e48074d8f9d7f3eaafa4b00e2, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T12:31:16,138 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/ns/cbb088f815e74b42a62bd0b990a22b21 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/ns/cbb088f815e74b42a62bd0b990a22b21 2024-12-05T12:31:16,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741834_1010 (size=1298) 2024-12-05T12:31:16,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741834_1010 (size=1298) 2024-12-05T12:31:16,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741834_1010 (size=1298) 2024-12-05T12:31:16,151 DEBUG [RS:1;2556e7014d8b:45739 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs 2024-12-05T12:31:16,151 INFO [RS:1;2556e7014d8b:45739 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2556e7014d8b%2C45739%2C1733401873843:(num 1733401874423) 2024-12-05T12:31:16,151 DEBUG [RS:1;2556e7014d8b:45739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:16,151 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:16,151 INFO [RS:1;2556e7014d8b:45739 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:16,151 INFO [RS:1;2556e7014d8b:45739 {}] hbase.ChoreService(370): Chore service for: regionserver/2556e7014d8b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:16,152 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:31:16,152 INFO [regionserver/2556e7014d8b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:16,152 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:31:16,152 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:31:16,152 INFO [RS:1;2556e7014d8b:45739 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:16,152 INFO [RS:1;2556e7014d8b:45739 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45739 2024-12-05T12:31:16,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2556e7014d8b,45739,1733401873843 2024-12-05T12:31:16,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:16,155 INFO [RS:1;2556e7014d8b:45739 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:16,156 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2556e7014d8b,45739,1733401873843] 2024-12-05T12:31:16,156 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/ns/cbb088f815e74b42a62bd0b990a22b21, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T12:31:16,157 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/.tmp/table/b56ac25866254a5db4c5d10006979588 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/table/b56ac25866254a5db4c5d10006979588 2024-12-05T12:31:16,158 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2556e7014d8b,45739,1733401873843 already deleted, retry=false 2024-12-05T12:31:16,158 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2556e7014d8b,45739,1733401873843 expired; onlineServers=1 2024-12-05T12:31:16,167 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/table/b56ac25866254a5db4c5d10006979588, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T12:31:16,169 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 231ms, sequenceid=11, compaction requested=false 2024-12-05T12:31:16,177 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T12:31:16,178 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T12:31:16,178 INFO [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T12:31:16,179 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733401875936Running coprocessor pre-close hooks at 1733401875936Disabling compacts and flushes for region at 1733401875936Disabling writes for close at 1733401875937 (+1 ms)Obtaining lock to block concurrent updates at 1733401875937Preparing flush snapshotting stores in 1588230740 at 1733401875937Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733401875938 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733401875939 (+1 ms)Flushing 1588230740/info: creating writer at 1733401875939Flushing 1588230740/info: appending metadata at 1733401875968 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733401875968Flushing 1588230740/ns: creating writer at 1733401876004 (+36 ms)Flushing 1588230740/ns: appending metadata at 1733401876030 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1733401876030Flushing 1588230740/table: creating writer at 1733401876069 (+39 ms)Flushing 1588230740/table: appending metadata at 1733401876100 (+31 ms)Flushing 1588230740/table: closing flushed file at 1733401876100Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60934e44: reopening flushed file at 1733401876125 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b24d454: reopening flushed file at 1733401876137 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a121af5: reopening flushed file at 1733401876156 (+19 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 231ms, sequenceid=11, compaction requested=false at 1733401876169 (+13 ms)Writing region close event to WAL at 1733401876172 (+3 ms)Running coprocessor post-close hooks at 1733401876178 (+6 ms)Closed at 1733401876178 2024-12-05T12:31:16,179 DEBUG [RS_CLOSE_META-regionserver/2556e7014d8b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T12:31:16,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,257 INFO [RS:1;2556e7014d8b:45739 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:16,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45739-0x100aa6b97ef0002, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,257 INFO [RS:1;2556e7014d8b:45739 {}] regionserver.HRegionServer(1031): Exiting; stopping=2556e7014d8b,45739,1733401873843; zookeeper connection closed. 2024-12-05T12:31:16,258 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b80f89e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b80f89e 2024-12-05T12:31:16,337 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(976): stopping server 2556e7014d8b,46755,1733401873880; all regions closed. 2024-12-05T12:31:16,337 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,337 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,338 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,338 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,338 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741836_1012 (size=2751) 2024-12-05T12:31:16,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741836_1012 (size=2751) 2024-12-05T12:31:16,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741836_1012 (size=2751) 2024-12-05T12:31:16,345 DEBUG [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs 2024-12-05T12:31:16,345 INFO [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2556e7014d8b%2C46755%2C1733401873880.meta:.meta(num 1733401874764) 2024-12-05T12:31:16,346 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,346 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,346 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,346 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,346 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741833_1009 (size=93) 2024-12-05T12:31:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741833_1009 (size=93) 2024-12-05T12:31:16,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741833_1009 (size=93) 2024-12-05T12:31:16,352 DEBUG [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/oldWALs 2024-12-05T12:31:16,352 INFO [RS:2;2556e7014d8b:46755 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2556e7014d8b%2C46755%2C1733401873880:(num 1733401874423) 2024-12-05T12:31:16,352 DEBUG [RS:2;2556e7014d8b:46755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:31:16,352 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:31:16,352 INFO [RS:2;2556e7014d8b:46755 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:16,352 INFO [RS:2;2556e7014d8b:46755 {}] hbase.ChoreService(370): Chore service for: regionserver/2556e7014d8b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:16,352 INFO [RS:2;2556e7014d8b:46755 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:16,353 INFO [regionserver/2556e7014d8b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:16,353 INFO [RS:2;2556e7014d8b:46755 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46755 2024-12-05T12:31:16,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2556e7014d8b,46755,1733401873880 2024-12-05T12:31:16,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:31:16,355 INFO [RS:2;2556e7014d8b:46755 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:16,356 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2556e7014d8b,46755,1733401873880] 2024-12-05T12:31:16,359 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2556e7014d8b,46755,1733401873880 already deleted, retry=false 2024-12-05T12:31:16,359 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2556e7014d8b,46755,1733401873880 expired; onlineServers=0 2024-12-05T12:31:16,359 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2556e7014d8b,46829,1733401873771' ***** 2024-12-05T12:31:16,359 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T12:31:16,359 INFO [M:0;2556e7014d8b:46829 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:31:16,359 INFO [M:0;2556e7014d8b:46829 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:31:16,359 DEBUG [M:0;2556e7014d8b:46829 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T12:31:16,359 DEBUG [M:0;2556e7014d8b:46829 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T12:31:16,359 DEBUG [master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.large.0-1733401874125 {}] cleaner.HFileCleaner(306): Exit Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.large.0-1733401874125,5,FailOnTimeoutGroup] 2024-12-05T12:31:16,359 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T12:31:16,359 DEBUG [master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.small.0-1733401874125 {}] cleaner.HFileCleaner(306): Exit Thread[master/2556e7014d8b:0:becomeActiveMaster-HFileCleaner.small.0-1733401874125,5,FailOnTimeoutGroup] 2024-12-05T12:31:16,359 INFO [M:0;2556e7014d8b:46829 {}] hbase.ChoreService(370): Chore service for: master/2556e7014d8b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T12:31:16,359 INFO [M:0;2556e7014d8b:46829 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:31:16,359 DEBUG [M:0;2556e7014d8b:46829 {}] master.HMaster(1795): Stopping service threads 2024-12-05T12:31:16,360 INFO [M:0;2556e7014d8b:46829 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T12:31:16,360 INFO [M:0;2556e7014d8b:46829 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T12:31:16,360 INFO [M:0;2556e7014d8b:46829 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T12:31:16,360 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T12:31:16,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T12:31:16,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:31:16,361 DEBUG [M:0;2556e7014d8b:46829 {}] zookeeper.ZKUtil(347): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T12:31:16,361 WARN [M:0;2556e7014d8b:46829 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T12:31:16,362 INFO [M:0;2556e7014d8b:46829 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/.lastflushedseqids 2024-12-05T12:31:16,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741843_1019 (size=127) 2024-12-05T12:31:16,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741843_1019 (size=127) 2024-12-05T12:31:16,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741843_1019 (size=127) 2024-12-05T12:31:16,373 INFO [M:0;2556e7014d8b:46829 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T12:31:16,373 INFO [M:0;2556e7014d8b:46829 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T12:31:16,373 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T12:31:16,374 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:16,374 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:16,374 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T12:31:16,374 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:16,374 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-05T12:31:16,399 DEBUG [M:0;2556e7014d8b:46829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bf8e4ba552d418fb0cbb15c394cd799 is 82, key is hbase:meta,,1/info:regioninfo/1733401874805/Put/seqid=0 2024-12-05T12:31:16,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741844_1020 (size=5672) 2024-12-05T12:31:16,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741844_1020 (size=5672) 2024-12-05T12:31:16,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741844_1020 (size=5672) 2024-12-05T12:31:16,409 INFO [M:0;2556e7014d8b:46829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bf8e4ba552d418fb0cbb15c394cd799 2024-12-05T12:31:16,432 DEBUG [M:0;2556e7014d8b:46829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3e1d1d98e8874a13a5888f00025e08e4 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733401875345/Put/seqid=0 2024-12-05T12:31:16,434 WARN [IPC Server handler 0 on default port 42703 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T12:31:16,434 WARN [IPC Server handler 0 on default port 42703 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T12:31:16,434 WARN [IPC Server handler 0 on default port 42703 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T12:31:16,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741845_1021 (size=6439) 2024-12-05T12:31:16,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741845_1021 (size=6439) 2024-12-05T12:31:16,439 INFO [M:0;2556e7014d8b:46829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3e1d1d98e8874a13a5888f00025e08e4 2024-12-05T12:31:16,458 INFO [RS:2;2556e7014d8b:46755 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:16,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,458 INFO [RS:2;2556e7014d8b:46755 {}] regionserver.HRegionServer(1031): Exiting; stopping=2556e7014d8b,46755,1733401873880; zookeeper connection closed. 2024-12-05T12:31:16,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46755-0x100aa6b97ef0003, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,459 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@43fc2c33 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@43fc2c33 2024-12-05T12:31:16,459 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T12:31:16,463 DEBUG [M:0;2556e7014d8b:46829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00a53a13030645be80253b75e5656e04 is 69, key is 2556e7014d8b,45367,1733401873815/rs:state/1733401874241/Put/seqid=0 2024-12-05T12:31:16,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741846_1022 (size=5294) 2024-12-05T12:31:16,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741846_1022 (size=5294) 2024-12-05T12:31:16,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741846_1022 (size=5294) 2024-12-05T12:31:16,480 INFO [M:0;2556e7014d8b:46829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00a53a13030645be80253b75e5656e04 2024-12-05T12:31:16,489 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bf8e4ba552d418fb0cbb15c394cd799 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0bf8e4ba552d418fb0cbb15c394cd799 2024-12-05T12:31:16,503 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0bf8e4ba552d418fb0cbb15c394cd799, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T12:31:16,504 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3e1d1d98e8874a13a5888f00025e08e4 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3e1d1d98e8874a13a5888f00025e08e4 2024-12-05T12:31:16,510 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3e1d1d98e8874a13a5888f00025e08e4, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T12:31:16,511 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00a53a13030645be80253b75e5656e04 as hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00a53a13030645be80253b75e5656e04 2024-12-05T12:31:16,518 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42703/user/jenkins/test-data/5a33fa3d-985f-5758-9a35-b4ff13d2bcf6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00a53a13030645be80253b75e5656e04, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T12:31:16,520 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=72, compaction requested=false 2024-12-05T12:31:16,521 INFO [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:31:16,521 DEBUG [M:0;2556e7014d8b:46829 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733401876373Disabling compacts and flushes for region at 1733401876373Disabling writes for close at 1733401876374 (+1 ms)Obtaining lock to block concurrent updates at 1733401876374Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733401876374Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733401876374Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733401876375 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733401876375Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733401876399 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733401876399Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733401876416 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733401876432 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733401876432Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733401876445 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733401876463 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733401876463Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35e0a663: reopening flushed file at 1733401876487 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f42897c: reopening flushed file at 1733401876503 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c08cf8c: reopening flushed file at 1733401876510 (+7 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=72, compaction requested=false at 1733401876520 (+10 ms)Writing region close event to WAL at 1733401876521 (+1 ms)Closed at 1733401876521 2024-12-05T12:31:16,522 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,522 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,522 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,522 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T12:31:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35315 is added to blk_1073741830_1006 (size=32674) 2024-12-05T12:31:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33583 is added to blk_1073741830_1006 (size=32674) 2024-12-05T12:31:16,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36987 is added to blk_1073741830_1006 (size=32674) 2024-12-05T12:31:16,527 INFO [M:0;2556e7014d8b:46829 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T12:31:16,527 INFO [M:0;2556e7014d8b:46829 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46829 2024-12-05T12:31:16,528 INFO [M:0;2556e7014d8b:46829 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:31:16,528 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:31:16,631 INFO [M:0;2556e7014d8b:46829 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:31:16,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46829-0x100aa6b97ef0000, quorum=127.0.0.1:53472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:31:16,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ae2d238{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:16,635 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@700f39d7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:16,635 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:16,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44968fad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:16,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@660b8bbc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:16,638 WARN [BP-242852367-172.17.0.2-1733401872864 heartbeating to localhost/127.0.0.1:42703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:31:16,638 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:31:16,638 WARN [BP-242852367-172.17.0.2-1733401872864 heartbeating to localhost/127.0.0.1:42703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242852367-172.17.0.2-1733401872864 (Datanode Uuid a8066a58-2c59-4809-8a1a-e701dfebdb58) service to localhost/127.0.0.1:42703 2024-12-05T12:31:16,638 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:31:16,639 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data5/current/BP-242852367-172.17.0.2-1733401872864 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:16,640 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data6/current/BP-242852367-172.17.0.2-1733401872864 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:16,640 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:31:16,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13173478{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:16,646 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45fbb531{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:16,646 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:16,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4148d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:16,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@505d9ca3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:16,649 WARN [BP-242852367-172.17.0.2-1733401872864 heartbeating to localhost/127.0.0.1:42703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:31:16,649 WARN [BP-242852367-172.17.0.2-1733401872864 heartbeating to localhost/127.0.0.1:42703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242852367-172.17.0.2-1733401872864 (Datanode Uuid 8fc6ae7d-54f0-4a3f-b91c-2c549f9a008d) service to localhost/127.0.0.1:42703 2024-12-05T12:31:16,650 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data3/current/BP-242852367-172.17.0.2-1733401872864 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:16,650 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data4/current/BP-242852367-172.17.0.2-1733401872864 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:16,650 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:31:16,650 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:31:16,651 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:31:16,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@56aa9d3b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:31:16,653 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f6e4afc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:16,653 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:16,654 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c4ebd49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:16,654 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c7a8992{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:16,659 WARN [BP-242852367-172.17.0.2-1733401872864 heartbeating to localhost/127.0.0.1:42703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:31:16,659 WARN [BP-242852367-172.17.0.2-1733401872864 heartbeating to localhost/127.0.0.1:42703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242852367-172.17.0.2-1733401872864 (Datanode Uuid 7f7d1356-0304-40ec-b960-181a1ed8d933) service to localhost/127.0.0.1:42703 2024-12-05T12:31:16,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data2/current/BP-242852367-172.17.0.2-1733401872864 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:16,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/cluster_4a70582a-4559-8f56-ebac-11181919276e/data/data1/current/BP-242852367-172.17.0.2-1733401872864 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:31:16,661 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:31:16,662 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:31:16,663 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:31:16,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4453029d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T12:31:16,671 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ad015a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:31:16,671 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:31:16,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28bcd4bd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:31:16,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41ab5cc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/03f6d92c-1a17-0c99-7ba6-c907e6fa0d91/hadoop.log.dir/,STOPPED} 2024-12-05T12:31:16,687 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T12:31:16,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T12:31:16,722 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=144 (was 83) - Thread LEAK? -, OpenFileDescriptor=518 (was 437) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=390 (was 363) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2763 (was 3038)