2024-12-11 23:05:49,880 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 23:05:49,892 main DEBUG Took 0.009385 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 23:05:49,892 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 23:05:49,892 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 23:05:49,893 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 23:05:49,894 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,903 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 23:05:49,922 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,923 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,924 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,925 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,925 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,927 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,927 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,928 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,928 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,929 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,929 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,930 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,930 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,931 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,931 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,932 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,932 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,933 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,933 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,934 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 23:05:49,935 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,935 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 23:05:49,937 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 23:05:49,938 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 23:05:49,941 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 23:05:49,941 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 23:05:49,943 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 23:05:49,943 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 23:05:49,953 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 23:05:49,956 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 23:05:49,958 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 23:05:49,959 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 23:05:49,959 main DEBUG createAppenders(={Console}) 2024-12-11 23:05:49,960 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-11 23:05:49,960 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 23:05:49,961 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-11 23:05:49,961 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 23:05:49,962 main DEBUG OutputStream closed 2024-12-11 23:05:49,962 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 23:05:49,962 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 23:05:49,963 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-11 23:05:50,028 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 23:05:50,030 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 23:05:50,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 23:05:50,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 23:05:50,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 23:05:50,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 23:05:50,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 23:05:50,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 23:05:50,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 23:05:50,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 23:05:50,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 23:05:50,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 23:05:50,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 23:05:50,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 23:05:50,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 23:05:50,035 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 23:05:50,035 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 23:05:50,036 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 23:05:50,038 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 23:05:50,038 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-11 23:05:50,038 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 23:05:50,039 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-11T23:05:50,052 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-11 23:05:50,054 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 23:05:50,054 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T23:05:50,291 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d 2024-12-11T23:05:50,319 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a, deleteOnExit=true 2024-12-11T23:05:50,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/test.cache.data in system properties and HBase conf 2024-12-11T23:05:50,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T23:05:50,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir in system properties and HBase conf 2024-12-11T23:05:50,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T23:05:50,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T23:05:50,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T23:05:50,416 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T23:05:50,511 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T23:05:50,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T23:05:50,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T23:05:50,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T23:05:50,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T23:05:50,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T23:05:50,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T23:05:50,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T23:05:50,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T23:05:50,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T23:05:50,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/nfs.dump.dir in system properties and HBase conf 2024-12-11T23:05:50,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/java.io.tmpdir in system properties and HBase conf 2024-12-11T23:05:50,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T23:05:50,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T23:05:50,522 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T23:05:51,547 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T23:05:51,610 INFO [Time-limited test {}] log.Log(170): Logging initialized @2339ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T23:05:51,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:05:51,735 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:05:51,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:05:51,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:05:51,761 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T23:05:51,777 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:05:51,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77f7f078{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:05:51,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e67f021{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:05:51,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63603efd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/java.io.tmpdir/jetty-localhost-40269-hadoop-hdfs-3_4_1-tests_jar-_-any-13199804539495007206/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T23:05:51,958 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f607d40{HTTP/1.1, (http/1.1)}{localhost:40269} 2024-12-11T23:05:51,958 INFO [Time-limited test {}] server.Server(415): Started @2688ms 2024-12-11T23:05:52,487 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:05:52,494 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:05:52,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:05:52,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:05:52,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T23:05:52,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d15633c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:05:52,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79f7513c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:05:52,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@103787ab{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/java.io.tmpdir/jetty-localhost-36553-hadoop-hdfs-3_4_1-tests_jar-_-any-16123149998467846874/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:05:52,597 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23a1a692{HTTP/1.1, (http/1.1)}{localhost:36553} 2024-12-11T23:05:52,597 INFO [Time-limited test {}] server.Server(415): Started @3326ms 2024-12-11T23:05:52,640 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T23:05:52,762 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:05:52,767 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:05:52,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:05:52,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:05:52,768 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T23:05:52,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f04a93b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:05:52,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e752fdd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:05:52,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43fe7c8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/java.io.tmpdir/jetty-localhost-37869-hadoop-hdfs-3_4_1-tests_jar-_-any-7063709228876419468/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:05:52,865 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6fecef94{HTTP/1.1, (http/1.1)}{localhost:37869} 2024-12-11T23:05:52,865 INFO [Time-limited test {}] server.Server(415): Started @3595ms 2024-12-11T23:05:52,868 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T23:05:52,902 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:05:52,909 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:05:52,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:05:52,911 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:05:52,911 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T23:05:52,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f4b982a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:05:52,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36ef30b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:05:53,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@793c26cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/java.io.tmpdir/jetty-localhost-38287-hadoop-hdfs-3_4_1-tests_jar-_-any-10475579249554320670/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:05:53,014 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e0ab7c3{HTTP/1.1, (http/1.1)}{localhost:38287} 2024-12-11T23:05:53,015 INFO [Time-limited test {}] server.Server(415): Started @3744ms 2024-12-11T23:05:53,016 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T23:05:54,689 WARN [Thread-129 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data2/current/BP-109485183-172.17.0.2-1733958351034/current, will proceed with Du for space computation calculation, 2024-12-11T23:05:54,689 WARN [Thread-126 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data5/current/BP-109485183-172.17.0.2-1733958351034/current, will proceed with Du for space computation calculation, 2024-12-11T23:05:54,689 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data3/current/BP-109485183-172.17.0.2-1733958351034/current, will proceed with Du for space computation calculation, 2024-12-11T23:05:54,689 WARN [Thread-128 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data1/current/BP-109485183-172.17.0.2-1733958351034/current, will proceed with Du for space computation calculation, 2024-12-11T23:05:54,689 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data4/current/BP-109485183-172.17.0.2-1733958351034/current, will proceed with Du for space computation calculation, 2024-12-11T23:05:54,689 WARN [Thread-127 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data6/current/BP-109485183-172.17.0.2-1733958351034/current, will proceed with Du for space computation calculation, 2024-12-11T23:05:54,726 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T23:05:54,726 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T23:05:54,727 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T23:05:54,769 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6b70b55b024296f with lease ID 0x246a3ea5bde1abae: Processing first storage report for DS-ed4c26c1-4caa-4506-a402-07b471ce2f54 from datanode DatanodeRegistration(127.0.0.1:40001, datanodeUuid=47107289-7e50-4755-a3e0-4cc78a0f053d, infoPort=37119, infoSecurePort=0, ipcPort=36793, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034) 2024-12-11T23:05:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6b70b55b024296f with lease ID 0x246a3ea5bde1abae: from storage DS-ed4c26c1-4caa-4506-a402-07b471ce2f54 node DatanodeRegistration(127.0.0.1:40001, datanodeUuid=47107289-7e50-4755-a3e0-4cc78a0f053d, infoPort=37119, infoSecurePort=0, ipcPort=36793, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-11T23:05:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6e860766f2a672b with lease ID 0x246a3ea5bde1abaf: Processing first storage report for DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92 from datanode DatanodeRegistration(127.0.0.1:39435, datanodeUuid=6f23d976-b72f-47d8-9209-bd40fcc68143, infoPort=34579, infoSecurePort=0, ipcPort=34343, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034) 2024-12-11T23:05:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6e860766f2a672b with lease ID 0x246a3ea5bde1abaf: from storage DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92 node DatanodeRegistration(127.0.0.1:39435, datanodeUuid=6f23d976-b72f-47d8-9209-bd40fcc68143, infoPort=34579, infoSecurePort=0, ipcPort=34343, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:05:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a15c77872778738 with lease ID 0x246a3ea5bde1abad: Processing first storage report for DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd from datanode DatanodeRegistration(127.0.0.1:39927, datanodeUuid=c9441099-1121-4f89-bff3-97b0d17d22c3, infoPort=44037, infoSecurePort=0, ipcPort=43723, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034) 2024-12-11T23:05:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a15c77872778738 with lease ID 0x246a3ea5bde1abad: from storage DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd node DatanodeRegistration(127.0.0.1:39927, datanodeUuid=c9441099-1121-4f89-bff3-97b0d17d22c3, infoPort=44037, infoSecurePort=0, ipcPort=43723, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:05:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6b70b55b024296f with lease ID 0x246a3ea5bde1abae: Processing first storage report for DS-909d85cd-2ad9-4456-9bfa-a176b3460448 from datanode DatanodeRegistration(127.0.0.1:40001, datanodeUuid=47107289-7e50-4755-a3e0-4cc78a0f053d, infoPort=37119, infoSecurePort=0, ipcPort=36793, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034) 2024-12-11T23:05:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6b70b55b024296f with lease ID 0x246a3ea5bde1abae: from storage DS-909d85cd-2ad9-4456-9bfa-a176b3460448 node DatanodeRegistration(127.0.0.1:40001, datanodeUuid=47107289-7e50-4755-a3e0-4cc78a0f053d, infoPort=37119, infoSecurePort=0, ipcPort=36793, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:05:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6e860766f2a672b with lease ID 0x246a3ea5bde1abaf: Processing first storage report for DS-eaada089-f423-41bd-aa5c-0d2f1ea6d593 from datanode DatanodeRegistration(127.0.0.1:39435, datanodeUuid=6f23d976-b72f-47d8-9209-bd40fcc68143, infoPort=34579, infoSecurePort=0, ipcPort=34343, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034) 2024-12-11T23:05:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6e860766f2a672b with lease ID 0x246a3ea5bde1abaf: from storage DS-eaada089-f423-41bd-aa5c-0d2f1ea6d593 node DatanodeRegistration(127.0.0.1:39435, datanodeUuid=6f23d976-b72f-47d8-9209-bd40fcc68143, infoPort=34579, infoSecurePort=0, ipcPort=34343, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T23:05:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a15c77872778738 with lease ID 0x246a3ea5bde1abad: Processing first storage report for DS-2cfcbf51-318e-4d62-945e-7821d30211ee from datanode DatanodeRegistration(127.0.0.1:39927, datanodeUuid=c9441099-1121-4f89-bff3-97b0d17d22c3, infoPort=44037, infoSecurePort=0, ipcPort=43723, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034) 2024-12-11T23:05:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a15c77872778738 with lease ID 0x246a3ea5bde1abad: from storage DS-2cfcbf51-318e-4d62-945e-7821d30211ee node DatanodeRegistration(127.0.0.1:39927, datanodeUuid=c9441099-1121-4f89-bff3-97b0d17d22c3, infoPort=44037, infoSecurePort=0, ipcPort=43723, storageInfo=lv=-57;cid=testClusterID;nsid=344824318;c=1733958351034), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:05:54,826 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d 2024-12-11T23:05:54,886 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-11T23:05:54,933 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=162, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=350, ProcessCount=11, AvailableMemoryMB=9806 2024-12-11T23:05:54,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T23:05:54,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-11T23:05:55,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/zookeeper_0, clientPort=65191, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T23:05:55,089 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65191 2024-12-11T23:05:55,107 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:55,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:55,183 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:55,183 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:55,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:38408 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38408 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:55,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-11T23:05:55,641 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:55,659 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323 with version=8 2024-12-11T23:05:55,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/hbase-staging 2024-12-11T23:05:55,744 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T23:05:55,977 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:05:55,986 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:55,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:55,991 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:05:55,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:55,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:05:56,129 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T23:05:56,181 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T23:05:56,189 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T23:05:56,192 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:05:56,213 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 77488 (auto-detected) 2024-12-11T23:05:56,214 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T23:05:56,229 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32963 2024-12-11T23:05:56,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32963 connecting to ZooKeeper ensemble=127.0.0.1:65191 2024-12-11T23:05:56,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329630x0, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:05:56,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32963-0x1001720cf410000 connected 2024-12-11T23:05:56,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,499 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:05:56,502 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323, hbase.cluster.distributed=false 2024-12-11T23:05:56,523 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:05:56,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32963 2024-12-11T23:05:56,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32963 2024-12-11T23:05:56,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32963 2024-12-11T23:05:56,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32963 2024-12-11T23:05:56,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32963 2024-12-11T23:05:56,618 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:05:56,619 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,619 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,619 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:05:56,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:05:56,622 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T23:05:56,625 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:05:56,625 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41227 2024-12-11T23:05:56,628 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41227 connecting to ZooKeeper ensemble=127.0.0.1:65191 2024-12-11T23:05:56,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412270x0, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:05:56,641 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41227-0x1001720cf410001 connected 2024-12-11T23:05:56,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:05:56,646 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T23:05:56,652 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T23:05:56,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T23:05:56,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:05:56,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41227 2024-12-11T23:05:56,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41227 2024-12-11T23:05:56,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41227 2024-12-11T23:05:56,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41227 2024-12-11T23:05:56,663 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41227 2024-12-11T23:05:56,676 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:05:56,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,677 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:05:56,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:05:56,677 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T23:05:56,678 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:05:56,678 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45367 2024-12-11T23:05:56,679 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45367 connecting to ZooKeeper ensemble=127.0.0.1:65191 2024-12-11T23:05:56,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,682 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453670x0, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:05:56,694 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:05:56,694 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45367-0x1001720cf410002 connected 2024-12-11T23:05:56,694 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T23:05:56,695 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T23:05:56,696 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T23:05:56,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:05:56,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45367 2024-12-11T23:05:56,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45367 2024-12-11T23:05:56,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45367 2024-12-11T23:05:56,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45367 2024-12-11T23:05:56,702 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45367 2024-12-11T23:05:56,716 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:05:56,716 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,716 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,716 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:05:56,716 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:05:56,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:05:56,717 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T23:05:56,717 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:05:56,718 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39185 2024-12-11T23:05:56,719 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39185 connecting to ZooKeeper ensemble=127.0.0.1:65191 2024-12-11T23:05:56,720 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,722 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391850x0, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:05:56,736 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39185-0x1001720cf410003 connected 2024-12-11T23:05:56,736 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:391850x0, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:05:56,737 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T23:05:56,738 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T23:05:56,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T23:05:56,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:05:56,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39185 2024-12-11T23:05:56,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39185 2024-12-11T23:05:56,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39185 2024-12-11T23:05:56,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39185 2024-12-11T23:05:56,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39185 2024-12-11T23:05:56,762 DEBUG [M:0;d462fa545078:32963 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d462fa545078:32963 2024-12-11T23:05:56,763 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d462fa545078,32963,1733958355833 2024-12-11T23:05:56,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,780 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d462fa545078,32963,1733958355833 2024-12-11T23:05:56,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T23:05:56,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T23:05:56,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T23:05:56,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,810 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T23:05:56,811 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d462fa545078,32963,1733958355833 from backup master directory 2024-12-11T23:05:56,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d462fa545078,32963,1733958355833 2024-12-11T23:05:56,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:05:56,820 WARN [master/d462fa545078:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:05:56,820 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d462fa545078,32963,1733958355833 2024-12-11T23:05:56,822 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T23:05:56,824 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T23:05:56,878 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/hbase.id] with ID: 2cfb4b3d-a201-4ec9-81a9-bf161e3e0e7f 2024-12-11T23:05:56,879 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/.tmp/hbase.id 2024-12-11T23:05:56,885 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:56,885 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:56,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:38444 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38444 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:56,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-11T23:05:56,894 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:56,894 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/.tmp/hbase.id]:[hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/hbase.id] 2024-12-11T23:05:56,936 INFO [master/d462fa545078:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:05:56,940 INFO [master/d462fa545078:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T23:05:56,957 INFO [master/d462fa545078:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-11T23:05:56,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:56,979 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:56,979 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:56,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:38462 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38462 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:56,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-11T23:05:56,989 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:57,001 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:05:57,002 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T23:05:57,007 INFO [master/d462fa545078:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T23:05:57,030 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:57,031 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:57,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:43232 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43232 dst: /127.0.0.1:39927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:57,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-11T23:05:57,039 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:57,054 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store 2024-12-11T23:05:57,071 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:57,071 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:57,074 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:39538 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39538 dst: /127.0.0.1:39435 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:57,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-11T23:05:57,080 WARN [master/d462fa545078:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:57,084 INFO [master/d462fa545078:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T23:05:57,087 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:05:57,088 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T23:05:57,088 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:05:57,089 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:05:57,090 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T23:05:57,090 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:05:57,090 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:05:57,092 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733958357088Disabling compacts and flushes for region at 1733958357088Disabling writes for close at 1733958357090 (+2 ms)Writing region close event to WAL at 1733958357090Closed at 1733958357090 2024-12-11T23:05:57,094 WARN [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/.initializing 2024-12-11T23:05:57,094 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/WALs/d462fa545078,32963,1733958355833 2024-12-11T23:05:57,104 INFO [master/d462fa545078:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T23:05:57,119 INFO [master/d462fa545078:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C32963%2C1733958355833, suffix=, logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/WALs/d462fa545078,32963,1733958355833, archiveDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/oldWALs, maxLogs=10 2024-12-11T23:05:57,153 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/WALs/d462fa545078,32963,1733958355833/d462fa545078%2C32963%2C1733958355833.1733958357124, exclude list is [], retry=0 2024-12-11T23:05:57,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:57,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39435,DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92,DISK] 2024-12-11T23:05:57,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39927,DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd,DISK] 2024-12-11T23:05:57,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40001,DS-ed4c26c1-4caa-4506-a402-07b471ce2f54,DISK] 2024-12-11T23:05:57,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T23:05:57,209 INFO [master/d462fa545078:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/WALs/d462fa545078,32963,1733958355833/d462fa545078%2C32963%2C1733958355833.1733958357124 2024-12-11T23:05:57,210 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44037:44037),(127.0.0.1/127.0.0.1:34579:34579),(127.0.0.1/127.0.0.1:37119:37119)] 2024-12-11T23:05:57,210 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:05:57,210 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:05:57,213 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,214 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T23:05:57,268 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:57,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:57,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T23:05:57,274 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:57,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:05:57,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T23:05:57,278 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:57,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:05:57,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T23:05:57,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:57,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:05:57,282 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,285 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,286 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,291 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,292 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,296 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T23:05:57,299 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:05:57,305 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:05:57,306 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74597106, jitterRate=0.11158350110054016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T23:05:57,313 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733958357224Initializing all the Stores at 1733958357226 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958357226Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958357227 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958357227Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958357227Cleaning up temporary data from old regions at 1733958357292 (+65 ms)Region opened successfully at 1733958357312 (+20 ms) 2024-12-11T23:05:57,314 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T23:05:57,343 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30342cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:05:57,369 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T23:05:57,378 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T23:05:57,378 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T23:05:57,381 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T23:05:57,382 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T23:05:57,386 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-11T23:05:57,386 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T23:05:57,407 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T23:05:57,414 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T23:05:57,461 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T23:05:57,465 INFO [master/d462fa545078:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T23:05:57,467 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T23:05:57,471 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T23:05:57,474 INFO [master/d462fa545078:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T23:05:57,478 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T23:05:57,482 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T23:05:57,483 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T23:05:57,492 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T23:05:57,511 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T23:05:57,524 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,540 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d462fa545078,32963,1733958355833, sessionid=0x1001720cf410000, setting cluster-up flag (Was=false) 2024-12-11T23:05:57,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,725 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T23:05:57,731 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d462fa545078,32963,1733958355833 2024-12-11T23:05:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-11T23:05:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-11T23:05:57,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:57,851 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T23:05:57,854 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d462fa545078,32963,1733958355833 2024-12-11T23:05:57,863 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T23:05:57,925 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T23:05:57,934 INFO [master/d462fa545078:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T23:05:57,939 INFO [master/d462fa545078:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T23:05:57,944 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d462fa545078,32963,1733958355833 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T23:05:57,950 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(746): ClusterId : 2cfb4b3d-a201-4ec9-81a9-bf161e3e0e7f 2024-12-11T23:05:57,950 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(746): ClusterId : 2cfb4b3d-a201-4ec9-81a9-bf161e3e0e7f 2024-12-11T23:05:57,950 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(746): ClusterId : 2cfb4b3d-a201-4ec9-81a9-bf161e3e0e7f 2024-12-11T23:05:57,951 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:05:57,951 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:05:57,951 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:05:57,951 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:05:57,951 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d462fa545078:0, corePoolSize=10, maxPoolSize=10 2024-12-11T23:05:57,952 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:57,952 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:05:57,952 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:57,952 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T23:05:57,952 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T23:05:57,952 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T23:05:57,957 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T23:05:57,957 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T23:05:57,961 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733958387961 2024-12-11T23:05:57,962 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:57,963 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T23:05:57,963 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T23:05:57,964 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T23:05:57,967 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T23:05:57,968 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T23:05:57,968 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T23:05:57,968 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T23:05:57,968 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T23:05:57,968 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T23:05:57,968 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T23:05:57,968 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T23:05:57,968 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T23:05:57,968 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T23:05:57,970 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:57,972 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:57,972 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:57,977 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T23:05:57,978 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T23:05:57,978 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T23:05:57,978 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T23:05:57,978 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T23:05:57,979 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T23:05:57,979 DEBUG [RS:0;d462fa545078:41227 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c332ac7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:05:57,979 DEBUG [RS:1;d462fa545078:45367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b51c3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:05:57,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:43260 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43260 dst: /127.0.0.1:39927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:57,981 DEBUG [RS:2;d462fa545078:39185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c246245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:05:57,982 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T23:05:57,982 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T23:05:57,985 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.large.0-1733958357983,5,FailOnTimeoutGroup] 2024-12-11T23:05:57,992 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.small.0-1733958357985,5,FailOnTimeoutGroup] 2024-12-11T23:05:57,993 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:57,993 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T23:05:57,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-11T23:05:57,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:57,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:57,996 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;d462fa545078:45367 2024-12-11T23:05:57,996 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:57,996 DEBUG [RS:0;d462fa545078:41227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d462fa545078:41227 2024-12-11T23:05:57,997 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T23:05:57,998 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323 2024-12-11T23:05:58,000 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;d462fa545078:39185 2024-12-11T23:05:58,000 INFO [RS:0;d462fa545078:41227 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T23:05:58,000 INFO [RS:1;d462fa545078:45367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T23:05:58,000 INFO [RS:2;d462fa545078:39185 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T23:05:58,000 INFO [RS:1;d462fa545078:45367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T23:05:58,000 INFO [RS:0;d462fa545078:41227 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T23:05:58,000 INFO [RS:2;d462fa545078:39185 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T23:05:58,000 DEBUG [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T23:05:58,000 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T23:05:58,000 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T23:05:58,003 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(2659): reportForDuty to master=d462fa545078,32963,1733958355833 with port=45367, startcode=1733958356676 2024-12-11T23:05:58,003 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(2659): reportForDuty to master=d462fa545078,32963,1733958355833 with port=41227, startcode=1733958356589 2024-12-11T23:05:58,003 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(2659): reportForDuty to master=d462fa545078,32963,1733958355833 with port=39185, startcode=1733958356715 2024-12-11T23:05:58,010 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:58,010 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:58,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:43280 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:39927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43280 dst: /127.0.0.1:39927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:58,015 DEBUG [RS:2;d462fa545078:39185 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T23:05:58,015 DEBUG [RS:0;d462fa545078:41227 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T23:05:58,015 DEBUG [RS:1;d462fa545078:45367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T23:05:58,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-11T23:05:58,020 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:58,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:05:58,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T23:05:58,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T23:05:58,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T23:05:58,031 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T23:05:58,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T23:05:58,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T23:05:58,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T23:05:58,043 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T23:05:58,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T23:05:58,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740 2024-12-11T23:05:58,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740 2024-12-11T23:05:58,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T23:05:58,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T23:05:58,053 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52837, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T23:05:58,053 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41011, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T23:05:58,053 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T23:05:58,053 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38245, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T23:05:58,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T23:05:58,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d462fa545078,45367,1733958356676 2024-12-11T23:05:58,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32963 {}] master.ServerManager(517): Registering regionserver=d462fa545078,45367,1733958356676 2024-12-11T23:05:58,067 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:05:58,068 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64715992, jitterRate=-0.03565657138824463}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T23:05:58,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733958358021Initializing all the Stores at 1733958358023 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958358023Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958358024 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958358024Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958358024Cleaning up temporary data from old regions at 1733958358052 (+28 ms)Region opened successfully at 1733958358070 (+18 ms) 2024-12-11T23:05:58,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T23:05:58,070 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T23:05:58,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T23:05:58,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T23:05:58,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T23:05:58,072 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T23:05:58,072 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d462fa545078,41227,1733958356589 2024-12-11T23:05:58,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733958358070Disabling compacts and flushes for region at 1733958358070Disabling writes for close at 1733958358071 (+1 ms)Writing region close event to WAL at 1733958358072 (+1 ms)Closed at 1733958358072 2024-12-11T23:05:58,072 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32963 {}] master.ServerManager(517): Registering regionserver=d462fa545078,41227,1733958356589 2024-12-11T23:05:58,076 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323 2024-12-11T23:05:58,076 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39211 2024-12-11T23:05:58,076 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T23:05:58,077 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T23:05:58,077 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T23:05:58,077 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d462fa545078,39185,1733958356715 2024-12-11T23:05:58,078 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32963 {}] master.ServerManager(517): Registering regionserver=d462fa545078,39185,1733958356715 2024-12-11T23:05:58,078 DEBUG [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323 2024-12-11T23:05:58,078 DEBUG [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39211 2024-12-11T23:05:58,078 DEBUG [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T23:05:58,081 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323 2024-12-11T23:05:58,081 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39211 2024-12-11T23:05:58,082 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T23:05:58,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T23:05:58,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T23:05:58,096 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T23:05:58,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:05:58,166 DEBUG [RS:0;d462fa545078:41227 {}] zookeeper.ZKUtil(111): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d462fa545078,41227,1733958356589 2024-12-11T23:05:58,166 DEBUG [RS:1;d462fa545078:45367 {}] zookeeper.ZKUtil(111): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d462fa545078,45367,1733958356676 2024-12-11T23:05:58,167 WARN [RS:1;d462fa545078:45367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:05:58,167 WARN [RS:0;d462fa545078:41227 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:05:58,167 INFO [RS:0;d462fa545078:41227 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T23:05:58,167 INFO [RS:1;d462fa545078:45367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T23:05:58,167 DEBUG [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,41227,1733958356589 2024-12-11T23:05:58,167 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676 2024-12-11T23:05:58,167 DEBUG [RS:2;d462fa545078:39185 {}] zookeeper.ZKUtil(111): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d462fa545078,39185,1733958356715 2024-12-11T23:05:58,167 WARN [RS:2;d462fa545078:39185 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:05:58,167 INFO [RS:2;d462fa545078:39185 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T23:05:58,168 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,39185,1733958356715 2024-12-11T23:05:58,169 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d462fa545078,39185,1733958356715] 2024-12-11T23:05:58,169 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d462fa545078,41227,1733958356589] 2024-12-11T23:05:58,169 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d462fa545078,45367,1733958356676] 2024-12-11T23:05:58,194 INFO [RS:0;d462fa545078:41227 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T23:05:58,194 INFO [RS:1;d462fa545078:45367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T23:05:58,194 INFO [RS:2;d462fa545078:39185 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T23:05:58,207 INFO [RS:2;d462fa545078:39185 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T23:05:58,207 INFO [RS:0;d462fa545078:41227 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T23:05:58,207 INFO [RS:1;d462fa545078:45367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T23:05:58,212 INFO [RS:1;d462fa545078:45367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T23:05:58,212 INFO [RS:2;d462fa545078:39185 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T23:05:58,212 INFO [RS:0;d462fa545078:41227 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T23:05:58,212 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,212 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,213 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,214 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T23:05:58,214 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T23:05:58,214 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T23:05:58,219 INFO [RS:2;d462fa545078:39185 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T23:05:58,219 INFO [RS:1;d462fa545078:45367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T23:05:58,219 INFO [RS:0;d462fa545078:41227 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T23:05:58,221 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,221 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,221 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,221 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,221 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:05:58,221 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:2;d462fa545078:39185 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,222 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,223 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:05:58,223 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:05:58,223 DEBUG [RS:1;d462fa545078:45367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:05:58,223 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:05:58,223 DEBUG [RS:0;d462fa545078:41227 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:05:58,223 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,223 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,223 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,223 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,39185,1733958356715-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,224 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,45367,1733958356676-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:05:58,225 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,225 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,225 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,225 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,225 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,225 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41227,1733958356589-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:05:58,247 WARN [d462fa545078:32963 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T23:05:58,248 INFO [RS:0;d462fa545078:41227 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T23:05:58,248 INFO [RS:1;d462fa545078:45367 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T23:05:58,248 INFO [RS:2;d462fa545078:39185 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T23:05:58,251 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41227,1733958356589-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,251 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,39185,1733958356715-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,251 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,45367,1733958356676-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,251 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,251 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,251 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,251 INFO [RS:0;d462fa545078:41227 {}] regionserver.Replication(171): d462fa545078,41227,1733958356589 started 2024-12-11T23:05:58,251 INFO [RS:2;d462fa545078:39185 {}] regionserver.Replication(171): d462fa545078,39185,1733958356715 started 2024-12-11T23:05:58,251 INFO [RS:1;d462fa545078:45367 {}] regionserver.Replication(171): d462fa545078,45367,1733958356676 started 2024-12-11T23:05:58,267 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,268 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(1482): Serving as d462fa545078,41227,1733958356589, RpcServer on d462fa545078/172.17.0.2:41227, sessionid=0x1001720cf410001 2024-12-11T23:05:58,268 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T23:05:58,269 DEBUG [RS:0;d462fa545078:41227 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d462fa545078,41227,1733958356589 2024-12-11T23:05:58,269 DEBUG [RS:0;d462fa545078:41227 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,41227,1733958356589' 2024-12-11T23:05:58,269 DEBUG [RS:0;d462fa545078:41227 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T23:05:58,269 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,269 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1482): Serving as d462fa545078,39185,1733958356715, RpcServer on d462fa545078/172.17.0.2:39185, sessionid=0x1001720cf410003 2024-12-11T23:05:58,269 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T23:05:58,269 DEBUG [RS:2;d462fa545078:39185 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d462fa545078,39185,1733958356715 2024-12-11T23:05:58,269 DEBUG [RS:2;d462fa545078:39185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,39185,1733958356715' 2024-12-11T23:05:58,270 DEBUG [RS:2;d462fa545078:39185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T23:05:58,270 DEBUG [RS:0;d462fa545078:41227 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T23:05:58,270 DEBUG [RS:2;d462fa545078:39185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T23:05:58,270 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T23:05:58,270 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T23:05:58,271 DEBUG [RS:0;d462fa545078:41227 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d462fa545078,41227,1733958356589 2024-12-11T23:05:58,271 DEBUG [RS:0;d462fa545078:41227 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,41227,1733958356589' 2024-12-11T23:05:58,271 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T23:05:58,271 DEBUG [RS:0;d462fa545078:41227 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T23:05:58,271 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T23:05:58,271 DEBUG [RS:2;d462fa545078:39185 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d462fa545078,39185,1733958356715 2024-12-11T23:05:58,271 DEBUG [RS:2;d462fa545078:39185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,39185,1733958356715' 2024-12-11T23:05:58,271 DEBUG [RS:2;d462fa545078:39185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T23:05:58,271 DEBUG [RS:0;d462fa545078:41227 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T23:05:58,271 DEBUG [RS:2;d462fa545078:39185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T23:05:58,272 DEBUG [RS:0;d462fa545078:41227 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T23:05:58,272 INFO [RS:0;d462fa545078:41227 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T23:05:58,272 DEBUG [RS:2;d462fa545078:39185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T23:05:58,272 INFO [RS:0;d462fa545078:41227 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T23:05:58,272 INFO [RS:2;d462fa545078:39185 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T23:05:58,272 INFO [RS:2;d462fa545078:39185 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T23:05:58,274 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:58,274 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1482): Serving as d462fa545078,45367,1733958356676, RpcServer on d462fa545078/172.17.0.2:45367, sessionid=0x1001720cf410002 2024-12-11T23:05:58,274 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T23:05:58,274 DEBUG [RS:1;d462fa545078:45367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d462fa545078,45367,1733958356676 2024-12-11T23:05:58,274 DEBUG [RS:1;d462fa545078:45367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,45367,1733958356676' 2024-12-11T23:05:58,274 DEBUG [RS:1;d462fa545078:45367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T23:05:58,275 DEBUG [RS:1;d462fa545078:45367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T23:05:58,275 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T23:05:58,275 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T23:05:58,276 DEBUG [RS:1;d462fa545078:45367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d462fa545078,45367,1733958356676 2024-12-11T23:05:58,276 DEBUG [RS:1;d462fa545078:45367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,45367,1733958356676' 2024-12-11T23:05:58,276 DEBUG [RS:1;d462fa545078:45367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T23:05:58,276 DEBUG [RS:1;d462fa545078:45367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T23:05:58,277 DEBUG [RS:1;d462fa545078:45367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T23:05:58,277 INFO [RS:1;d462fa545078:45367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T23:05:58,277 INFO [RS:1;d462fa545078:45367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T23:05:58,378 INFO [RS:1;d462fa545078:45367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T23:05:58,378 INFO [RS:2;d462fa545078:39185 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T23:05:58,379 INFO [RS:0;d462fa545078:41227 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T23:05:58,383 INFO [RS:2;d462fa545078:39185 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C39185%2C1733958356715, suffix=, logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,39185,1733958356715, archiveDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs, maxLogs=32 2024-12-11T23:05:58,383 INFO [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C45367%2C1733958356676, suffix=, logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676, archiveDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs, maxLogs=32 2024-12-11T23:05:58,383 INFO [RS:0;d462fa545078:41227 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C41227%2C1733958356589, suffix=, logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,41227,1733958356589, archiveDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs, maxLogs=32 2024-12-11T23:05:58,397 DEBUG [RS:2;d462fa545078:39185 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,39185,1733958356715/d462fa545078%2C39185%2C1733958356715.1733958358386, exclude list is [], retry=0 2024-12-11T23:05:58,397 DEBUG [RS:1;d462fa545078:45367 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676/d462fa545078%2C45367%2C1733958356676.1733958358386, exclude list is [], retry=0 2024-12-11T23:05:58,400 DEBUG [RS:0;d462fa545078:41227 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,41227,1733958356589/d462fa545078%2C41227%2C1733958356589.1733958358386, exclude list is [], retry=0 2024-12-11T23:05:58,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39927,DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd,DISK] 2024-12-11T23:05:58,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40001,DS-ed4c26c1-4caa-4506-a402-07b471ce2f54,DISK] 2024-12-11T23:05:58,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39927,DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd,DISK] 2024-12-11T23:05:58,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39435,DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92,DISK] 2024-12-11T23:05:58,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39435,DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92,DISK] 2024-12-11T23:05:58,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39435,DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92,DISK] 2024-12-11T23:05:58,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39927,DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd,DISK] 2024-12-11T23:05:58,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40001,DS-ed4c26c1-4caa-4506-a402-07b471ce2f54,DISK] 2024-12-11T23:05:58,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40001,DS-ed4c26c1-4caa-4506-a402-07b471ce2f54,DISK] 2024-12-11T23:05:58,424 INFO [RS:0;d462fa545078:41227 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,41227,1733958356589/d462fa545078%2C41227%2C1733958356589.1733958358386 2024-12-11T23:05:58,425 INFO [RS:2;d462fa545078:39185 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,39185,1733958356715/d462fa545078%2C39185%2C1733958356715.1733958358386 2024-12-11T23:05:58,425 DEBUG [RS:0;d462fa545078:41227 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44037:44037),(127.0.0.1/127.0.0.1:34579:34579),(127.0.0.1/127.0.0.1:37119:37119)] 2024-12-11T23:05:58,426 DEBUG [RS:2;d462fa545078:39185 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44037:44037),(127.0.0.1/127.0.0.1:34579:34579),(127.0.0.1/127.0.0.1:37119:37119)] 2024-12-11T23:05:58,434 INFO [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676/d462fa545078%2C45367%2C1733958356676.1733958358386 2024-12-11T23:05:58,435 DEBUG [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44037:44037),(127.0.0.1/127.0.0.1:34579:34579),(127.0.0.1/127.0.0.1:37119:37119)] 2024-12-11T23:05:58,501 DEBUG [d462fa545078:32963 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T23:05:58,512 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(204): Hosts are {d462fa545078=0} racks are {/default-rack=0} 2024-12-11T23:05:58,518 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:05:58,518 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:05:58,519 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:05:58,519 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:05:58,519 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:05:58,519 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:05:58,519 INFO [d462fa545078:32963 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:05:58,519 INFO [d462fa545078:32963 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:05:58,519 INFO [d462fa545078:32963 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:05:58,519 DEBUG [d462fa545078:32963 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:05:58,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d462fa545078,45367,1733958356676 2024-12-11T23:05:58,532 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d462fa545078,45367,1733958356676, state=OPENING 2024-12-11T23:05:58,587 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T23:05:58,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:58,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:58,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:58,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:05:58,601 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,601 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,602 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,602 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,606 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T23:05:58,609 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d462fa545078,45367,1733958356676}] 2024-12-11T23:05:58,784 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:05:58,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36653, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:05:58,798 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T23:05:58,798 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T23:05:58,799 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T23:05:58,802 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C45367%2C1733958356676.meta, suffix=.meta, logDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676, archiveDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs, maxLogs=32 2024-12-11T23:05:58,817 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676/d462fa545078%2C45367%2C1733958356676.meta.1733958358804.meta, exclude list is [], retry=0 2024-12-11T23:05:58,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40001,DS-ed4c26c1-4caa-4506-a402-07b471ce2f54,DISK] 2024-12-11T23:05:58,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39927,DS-606f49ca-f831-42b6-91d6-cdbc3802e5cd,DISK] 2024-12-11T23:05:58,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39435,DS-777ab5c3-f813-46c8-88e9-02c5afc6ab92,DISK] 2024-12-11T23:05:58,824 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/WALs/d462fa545078,45367,1733958356676/d462fa545078%2C45367%2C1733958356676.meta.1733958358804.meta 2024-12-11T23:05:58,825 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37119:37119),(127.0.0.1/127.0.0.1:44037:44037),(127.0.0.1/127.0.0.1:34579:34579)] 2024-12-11T23:05:58,825 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:05:58,827 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T23:05:58,830 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T23:05:58,836 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T23:05:58,840 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T23:05:58,841 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:05:58,841 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T23:05:58,841 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T23:05:58,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T23:05:58,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T23:05:58,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T23:05:58,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T23:05:58,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T23:05:58,851 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T23:05:58,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T23:05:58,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T23:05:58,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:58,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:05:58,854 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T23:05:58,856 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740 2024-12-11T23:05:58,859 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740 2024-12-11T23:05:58,861 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T23:05:58,861 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T23:05:58,862 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T23:05:58,865 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T23:05:58,866 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73265336, jitterRate=0.09173858165740967}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T23:05:58,866 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T23:05:58,868 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733958358841Writing region info on filesystem at 1733958358842 (+1 ms)Initializing all the Stores at 1733958358844 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958358844Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958358844Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958358844Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958358844Cleaning up temporary data from old regions at 1733958358861 (+17 ms)Running coprocessor post-open hooks at 1733958358866 (+5 ms)Region opened successfully at 1733958358868 (+2 ms) 2024-12-11T23:05:58,874 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733958358776 2024-12-11T23:05:58,884 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T23:05:58,884 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T23:05:58,885 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d462fa545078,45367,1733958356676 2024-12-11T23:05:58,887 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d462fa545078,45367,1733958356676, state=OPEN 2024-12-11T23:05:58,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:05:58,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:05:58,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:05:58,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:05:58,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:05:58,914 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d462fa545078,45367,1733958356676 2024-12-11T23:05:58,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T23:05:58,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d462fa545078,45367,1733958356676 in 305 msec 2024-12-11T23:05:58,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T23:05:58,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 840 msec 2024-12-11T23:05:58,931 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T23:05:58,931 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T23:05:58,948 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:05:58,949 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d462fa545078,45367,1733958356676, seqNum=-1] 2024-12-11T23:05:58,986 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:05:58,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53077, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:05:59,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1170 sec 2024-12-11T23:05:59,009 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733958359009, completionTime=-1 2024-12-11T23:05:59,016 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T23:05:59,016 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T23:05:59,043 INFO [master/d462fa545078:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T23:05:59,043 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733958419043 2024-12-11T23:05:59,043 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733958479043 2024-12-11T23:05:59,044 INFO [master/d462fa545078:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-11T23:05:59,045 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:05:59,051 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,32963,1733958355833-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,052 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,32963,1733958355833-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,052 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,32963,1733958355833-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,053 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d462fa545078:32963, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,054 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,054 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,060 DEBUG [master/d462fa545078:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T23:05:59,084 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.264sec 2024-12-11T23:05:59,085 INFO [master/d462fa545078:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T23:05:59,086 INFO [master/d462fa545078:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T23:05:59,087 INFO [master/d462fa545078:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T23:05:59,087 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T23:05:59,087 INFO [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T23:05:59,088 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,32963,1733958355833-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:05:59,088 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,32963,1733958355833-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T23:05:59,093 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T23:05:59,094 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T23:05:59,094 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,32963,1733958355833-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:05:59,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15b7d5d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:05:59,164 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T23:05:59,164 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T23:05:59,168 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d462fa545078,32963,-1 for getting cluster id 2024-12-11T23:05:59,171 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:05:59,179 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2cfb4b3d-a201-4ec9-81a9-bf161e3e0e7f' 2024-12-11T23:05:59,181 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:05:59,181 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2cfb4b3d-a201-4ec9-81a9-bf161e3e0e7f" 2024-12-11T23:05:59,182 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4921f8f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:05:59,182 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d462fa545078,32963,-1] 2024-12-11T23:05:59,184 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:05:59,186 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:05:59,187 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:05:59,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30985e89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:05:59,190 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:05:59,197 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d462fa545078,45367,1733958356676, seqNum=-1] 2024-12-11T23:05:59,198 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:05:59,200 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40630, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:05:59,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d462fa545078,32963,1733958355833 2024-12-11T23:05:59,222 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T23:05:59,227 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is d462fa545078,32963,1733958355833 2024-12-11T23:05:59,229 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@33778af6 2024-12-11T23:05:59,229 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T23:05:59,232 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42514, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T23:05:59,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:05:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T23:05:59,248 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:05:59,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T23:05:59,251 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:59,254 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:05:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:05:59,263 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:59,263 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:59,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:38536 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38536 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:59,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-11T23:05:59,270 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:59,273 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1ea9365960e4551483f051ffe209e83c, NAME => 'TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323 2024-12-11T23:05:59,279 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:59,279 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:05:59,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:38550 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38550 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:05:59,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-11T23:05:59,286 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:05:59,287 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:05:59,287 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 1ea9365960e4551483f051ffe209e83c, disabling compactions & flushes 2024-12-11T23:05:59,287 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,287 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,287 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. after waiting 0 ms 2024-12-11T23:05:59,287 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,287 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,287 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1ea9365960e4551483f051ffe209e83c: Waiting for close lock at 1733958359287Disabling compacts and flushes for region at 1733958359287Disabling writes for close at 1733958359287Writing region close event to WAL at 1733958359287Closed at 1733958359287 2024-12-11T23:05:59,289 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:05:59,293 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733958359289"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958359289"}]},"ts":"1733958359289"} 2024-12-11T23:05:59,299 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T23:05:59,301 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:05:59,304 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958359301"}]},"ts":"1733958359301"} 2024-12-11T23:05:59,308 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T23:05:59,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {d462fa545078=0} racks are {/default-rack=0} 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:05:59,310 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:05:59,310 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:05:59,310 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:05:59,310 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:05:59,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ea9365960e4551483f051ffe209e83c, ASSIGN}] 2024-12-11T23:05:59,314 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ea9365960e4551483f051ffe209e83c, ASSIGN 2024-12-11T23:05:59,316 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ea9365960e4551483f051ffe209e83c, ASSIGN; state=OFFLINE, location=d462fa545078,39185,1733958356715; forceNewPlan=false, retain=false 2024-12-11T23:05:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:05:59,470 INFO [d462fa545078:32963 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T23:05:59,472 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ea9365960e4551483f051ffe209e83c, regionState=OPENING, regionLocation=d462fa545078,39185,1733958356715 2024-12-11T23:05:59,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ea9365960e4551483f051ffe209e83c, ASSIGN because future has completed 2024-12-11T23:05:59,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ea9365960e4551483f051ffe209e83c, server=d462fa545078,39185,1733958356715}] 2024-12-11T23:05:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:05:59,636 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:05:59,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47687, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:05:59,643 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,643 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ea9365960e4551483f051ffe209e83c, NAME => 'TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c.', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:05:59,644 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,644 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:05:59,644 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,644 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,646 INFO [StoreOpener-1ea9365960e4551483f051ffe209e83c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,649 INFO [StoreOpener-1ea9365960e4551483f051ffe209e83c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ea9365960e4551483f051ffe209e83c columnFamilyName cf 2024-12-11T23:05:59,649 DEBUG [StoreOpener-1ea9365960e4551483f051ffe209e83c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:05:59,650 INFO [StoreOpener-1ea9365960e4551483f051ffe209e83c-1 {}] regionserver.HStore(327): Store=1ea9365960e4551483f051ffe209e83c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:05:59,650 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,651 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,652 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,652 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,652 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,655 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,659 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:05:59,660 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1ea9365960e4551483f051ffe209e83c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69754205, jitterRate=0.03941865265369415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:05:59,660 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:05:59,661 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1ea9365960e4551483f051ffe209e83c: Running coprocessor pre-open hook at 1733958359644Writing region info on filesystem at 1733958359644Initializing all the Stores at 1733958359646 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958359646Cleaning up temporary data from old regions at 1733958359652 (+6 ms)Running coprocessor post-open hooks at 1733958359660 (+8 ms)Region opened successfully at 1733958359661 (+1 ms) 2024-12-11T23:05:59,663 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c., pid=6, masterSystemTime=1733958359635 2024-12-11T23:05:59,666 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,666 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:05:59,667 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ea9365960e4551483f051ffe209e83c, regionState=OPEN, openSeqNum=2, regionLocation=d462fa545078,39185,1733958356715 2024-12-11T23:05:59,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ea9365960e4551483f051ffe209e83c, server=d462fa545078,39185,1733958356715 because future has completed 2024-12-11T23:05:59,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T23:05:59,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ea9365960e4551483f051ffe209e83c, server=d462fa545078,39185,1733958356715 in 193 msec 2024-12-11T23:05:59,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T23:05:59,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ea9365960e4551483f051ffe209e83c, ASSIGN in 366 msec 2024-12-11T23:05:59,681 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:05:59,682 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958359682"}]},"ts":"1733958359682"} 2024-12-11T23:05:59,685 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T23:05:59,686 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:05:59,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 446 msec 2024-12-11T23:05:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:05:59,885 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T23:05:59,885 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T23:05:59,887 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:05:59,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T23:05:59,894 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:05:59,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T23:05:59,906 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c., hostname=d462fa545078,39185,1733958356715, seqNum=2] 2024-12-11T23:05:59,907 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:05:59,909 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:05:59,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T23:05:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T23:05:59,926 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T23:05:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:05:59,928 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T23:05:59,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T23:06:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:00,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39185 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T23:06:00,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:00,100 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 1ea9365960e4551483f051ffe209e83c 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T23:06:00,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/.tmp/cf/a29641eb39db448088bc258f2661f126 is 36, key is row/cf:cq/1733958359910/Put/seqid=0 2024-12-11T23:06:00,153 WARN [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:00,153 WARN [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:00,156 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676219600_22 at /127.0.0.1:47280 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47280 dst: /127.0.0.1:39927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:00,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-11T23:06:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:00,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:00,562 WARN [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:00,563 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/.tmp/cf/a29641eb39db448088bc258f2661f126 2024-12-11T23:06:00,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/.tmp/cf/a29641eb39db448088bc258f2661f126 as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/cf/a29641eb39db448088bc258f2661f126 2024-12-11T23:06:00,615 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/cf/a29641eb39db448088bc258f2661f126, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T23:06:00,622 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 1ea9365960e4551483f051ffe209e83c in 523ms, sequenceid=5, compaction requested=false 2024-12-11T23:06:00,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-11T23:06:00,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 1ea9365960e4551483f051ffe209e83c: 2024-12-11T23:06:00,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:00,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T23:06:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T23:06:00,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T23:06:00,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 700 msec 2024-12-11T23:06:00,637 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 715 msec 2024-12-11T23:06:00,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-11T23:06:00,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-11T23:06:00,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-11T23:06:00,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-11T23:06:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-11T23:06:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-11T23:06:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-11T23:06:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-11T23:06:00,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-11T23:06:00,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-11T23:06:00,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-11T23:06:00,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-11T23:06:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:01,064 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T23:06:01,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T23:06:01,078 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T23:06:01,078 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:01,081 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,082 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:06:01,082 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T23:06:01,082 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1328198881, stopped=false 2024-12-11T23:06:01,083 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d462fa545078,32963,1733958355833 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:01,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:01,135 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T23:06:01,137 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T23:06:01,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:01,137 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:01,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,138 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:01,138 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:01,139 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:01,139 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd462fa545078,41227,1733958356589' ***** 2024-12-11T23:06:01,139 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:06:01,139 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd462fa545078,45367,1733958356676' ***** 2024-12-11T23:06:01,139 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:06:01,139 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd462fa545078,39185,1733958356715' ***** 2024-12-11T23:06:01,139 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:06:01,140 INFO [RS:0;d462fa545078:41227 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:06:01,140 INFO [RS:0;d462fa545078:41227 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:06:01,140 INFO [RS:2;d462fa545078:39185 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:06:01,140 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:06:01,140 INFO [RS:2;d462fa545078:39185 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:06:01,140 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:06:01,140 INFO [RS:0;d462fa545078:41227 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:06:01,141 INFO [RS:2;d462fa545078:39185 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:06:01,141 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(959): stopping server d462fa545078,41227,1733958356589 2024-12-11T23:06:01,141 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(3091): Received CLOSE for 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:06:01,141 INFO [RS:0;d462fa545078:41227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:01,141 INFO [RS:0;d462fa545078:41227 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d462fa545078:41227. 2024-12-11T23:06:01,141 INFO [RS:1;d462fa545078:45367 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:06:01,141 INFO [RS:1;d462fa545078:45367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:06:01,141 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:06:01,141 DEBUG [RS:0;d462fa545078:41227 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:01,141 INFO [RS:1;d462fa545078:45367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:06:01,141 DEBUG [RS:0;d462fa545078:41227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,141 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(959): stopping server d462fa545078,45367,1733958356676 2024-12-11T23:06:01,141 INFO [RS:1;d462fa545078:45367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:01,141 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(959): stopping server d462fa545078,39185,1733958356715 2024-12-11T23:06:01,142 INFO [RS:1;d462fa545078:45367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;d462fa545078:45367. 2024-12-11T23:06:01,142 INFO [RS:2;d462fa545078:39185 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:01,142 DEBUG [RS:1;d462fa545078:45367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:01,142 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(976): stopping server d462fa545078,41227,1733958356589; all regions closed. 2024-12-11T23:06:01,142 DEBUG [RS:1;d462fa545078:45367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,142 INFO [RS:2;d462fa545078:39185 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;d462fa545078:39185. 2024-12-11T23:06:01,142 DEBUG [RS:2;d462fa545078:39185 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:01,142 DEBUG [RS:2;d462fa545078:39185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,142 INFO [RS:1;d462fa545078:45367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:06:01,142 INFO [RS:1;d462fa545078:45367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:06:01,142 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1ea9365960e4551483f051ffe209e83c, disabling compactions & flushes 2024-12-11T23:06:01,142 INFO [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:01,142 INFO [RS:1;d462fa545078:45367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:06:01,142 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:01,142 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. after waiting 0 ms 2024-12-11T23:06:01,142 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T23:06:01,142 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:01,143 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T23:06:01,143 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T23:06:01,143 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T23:06:01,143 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T23:06:01,143 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1325): Online Regions={1ea9365960e4551483f051ffe209e83c=TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c.} 2024-12-11T23:06:01,143 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T23:06:01,143 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T23:06:01,144 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T23:06:01,144 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T23:06:01,144 DEBUG [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1351): Waiting on 1ea9365960e4551483f051ffe209e83c 2024-12-11T23:06:01,144 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T23:06:01,144 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T23:06:01,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741828_1018 (size=93) 2024-12-11T23:06:01,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_1073741828_1018 (size=93) 2024-12-11T23:06:01,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741828_1018 (size=93) 2024-12-11T23:06:01,156 DEBUG [RS:0;d462fa545078:41227 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs 2024-12-11T23:06:01,156 INFO [RS:0;d462fa545078:41227 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d462fa545078%2C41227%2C1733958356589:(num 1733958358386) 2024-12-11T23:06:01,156 DEBUG [RS:0;d462fa545078:41227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,156 INFO [RS:0;d462fa545078:41227 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:01,156 INFO [RS:0;d462fa545078:41227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:01,157 INFO [RS:0;d462fa545078:41227 {}] hbase.ChoreService(370): Chore service for: regionserver/d462fa545078:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:01,157 INFO [RS:0;d462fa545078:41227 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:06:01,157 INFO [RS:0;d462fa545078:41227 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:06:01,157 INFO [regionserver/d462fa545078:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:01,157 INFO [RS:0;d462fa545078:41227 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:06:01,157 INFO [RS:0;d462fa545078:41227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:01,157 INFO [RS:0;d462fa545078:41227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41227 2024-12-11T23:06:01,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d462fa545078,41227,1733958356589 2024-12-11T23:06:01,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:01,166 INFO [RS:0;d462fa545078:41227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:01,167 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d462fa545078,41227,1733958356589] 2024-12-11T23:06:01,167 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/default/TestHBaseWalOnEC/1ea9365960e4551483f051ffe209e83c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T23:06:01,169 INFO [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:01,169 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1ea9365960e4551483f051ffe209e83c: Waiting for close lock at 1733958361142Running coprocessor pre-close hooks at 1733958361142Disabling compacts and flushes for region at 1733958361142Disabling writes for close at 1733958361142Writing region close event to WAL at 1733958361144 (+2 ms)Running coprocessor post-close hooks at 1733958361168 (+24 ms)Closed at 1733958361169 (+1 ms) 2024-12-11T23:06:01,170 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c. 2024-12-11T23:06:01,187 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d462fa545078,41227,1733958356589 already deleted, retry=false 2024-12-11T23:06:01,187 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d462fa545078,41227,1733958356589 expired; onlineServers=2 2024-12-11T23:06:01,187 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/info/b70693bd0abe423ea67c351b1ddb292f is 153, key is TestHBaseWalOnEC,,1733958359234.1ea9365960e4551483f051ffe209e83c./info:regioninfo/1733958359667/Put/seqid=0 2024-12-11T23:06:01,191 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,191 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268361409_22 at /127.0.0.1:58218 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58218 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-11T23:06:01,202 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,203 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/info/b70693bd0abe423ea67c351b1ddb292f 2024-12-11T23:06:01,224 INFO [regionserver/d462fa545078:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T23:06:01,224 INFO [regionserver/d462fa545078:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T23:06:01,227 INFO [regionserver/d462fa545078:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T23:06:01,227 INFO [regionserver/d462fa545078:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T23:06:01,228 INFO [regionserver/d462fa545078:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:01,232 INFO [regionserver/d462fa545078:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:01,232 INFO [regionserver/d462fa545078:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:01,234 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/ns/82e3838e662848c5b4155a82bcda7935 is 43, key is default/ns:d/1733958358993/Put/seqid=0 2024-12-11T23:06:01,237 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,237 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,240 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268361409_22 at /127.0.0.1:58232 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58232 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-11T23:06:01,245 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,245 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/ns/82e3838e662848c5b4155a82bcda7935 2024-12-11T23:06:01,273 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/table/a616e30b7f6348b89e58d65e56e93ef1 is 52, key is TestHBaseWalOnEC/table:state/1733958359682/Put/seqid=0 2024-12-11T23:06:01,276 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,276 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41227-0x1001720cf410001, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,277 INFO [RS:0;d462fa545078:41227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:01,278 INFO [RS:0;d462fa545078:41227 {}] regionserver.HRegionServer(1031): Exiting; stopping=d462fa545078,41227,1733958356589; zookeeper connection closed. 2024-12-11T23:06:01,278 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b93b6b4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b93b6b4 2024-12-11T23:06:01,280 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268361409_22 at /127.0.0.1:58254 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58254 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-11T23:06:01,284 WARN [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,284 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/table/a616e30b7f6348b89e58d65e56e93ef1 2024-12-11T23:06:01,297 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/info/b70693bd0abe423ea67c351b1ddb292f as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/info/b70693bd0abe423ea67c351b1ddb292f 2024-12-11T23:06:01,308 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/info/b70693bd0abe423ea67c351b1ddb292f, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T23:06:01,310 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/ns/82e3838e662848c5b4155a82bcda7935 as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/ns/82e3838e662848c5b4155a82bcda7935 2024-12-11T23:06:01,320 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/ns/82e3838e662848c5b4155a82bcda7935, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T23:06:01,322 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/.tmp/table/a616e30b7f6348b89e58d65e56e93ef1 as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/table/a616e30b7f6348b89e58d65e56e93ef1 2024-12-11T23:06:01,332 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/table/a616e30b7f6348b89e58d65e56e93ef1, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T23:06:01,334 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 189ms, sequenceid=11, compaction requested=false 2024-12-11T23:06:01,334 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T23:06:01,343 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T23:06:01,344 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(976): stopping server d462fa545078,39185,1733958356715; all regions closed. 2024-12-11T23:06:01,344 DEBUG [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T23:06:01,344 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T23:06:01,344 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T23:06:01,344 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733958361143Running coprocessor pre-close hooks at 1733958361143Disabling compacts and flushes for region at 1733958361143Disabling writes for close at 1733958361144 (+1 ms)Obtaining lock to block concurrent updates at 1733958361144Preparing flush snapshotting stores in 1588230740 at 1733958361144Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733958361145 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733958361148 (+3 ms)Flushing 1588230740/info: creating writer at 1733958361149 (+1 ms)Flushing 1588230740/info: appending metadata at 1733958361184 (+35 ms)Flushing 1588230740/info: closing flushed file at 1733958361184Flushing 1588230740/ns: creating writer at 1733958361213 (+29 ms)Flushing 1588230740/ns: appending metadata at 1733958361233 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733958361233Flushing 1588230740/table: creating writer at 1733958361255 (+22 ms)Flushing 1588230740/table: appending metadata at 1733958361272 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733958361272Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7884a8c8: reopening flushed file at 1733958361295 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@186954be: reopening flushed file at 1733958361309 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@130885b0: reopening flushed file at 1733958361320 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 189ms, sequenceid=11, compaction requested=false at 1733958361334 (+14 ms)Writing region close event to WAL at 1733958361336 (+2 ms)Running coprocessor post-close hooks at 1733958361344 (+8 ms)Closed at 1733958361344 2024-12-11T23:06:01,344 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T23:06:01,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_1073741827_1017 (size=1298) 2024-12-11T23:06:01,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741827_1017 (size=1298) 2024-12-11T23:06:01,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741827_1017 (size=1298) 2024-12-11T23:06:01,351 DEBUG [RS:2;d462fa545078:39185 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs 2024-12-11T23:06:01,351 INFO [RS:2;d462fa545078:39185 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d462fa545078%2C39185%2C1733958356715:(num 1733958358386) 2024-12-11T23:06:01,351 DEBUG [RS:2;d462fa545078:39185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,351 INFO [RS:2;d462fa545078:39185 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:01,351 INFO [RS:2;d462fa545078:39185 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:01,351 INFO [RS:2;d462fa545078:39185 {}] hbase.ChoreService(370): Chore service for: regionserver/d462fa545078:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:01,351 INFO [RS:2;d462fa545078:39185 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:06:01,351 INFO [regionserver/d462fa545078:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:01,351 INFO [RS:2;d462fa545078:39185 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:06:01,352 INFO [RS:2;d462fa545078:39185 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:06:01,352 INFO [RS:2;d462fa545078:39185 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:01,352 INFO [RS:2;d462fa545078:39185 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39185 2024-12-11T23:06:01,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:01,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d462fa545078,39185,1733958356715 2024-12-11T23:06:01,365 INFO [RS:2;d462fa545078:39185 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:01,365 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007f263c8f8290@66717d1e rejected from java.util.concurrent.ThreadPoolExecutor@2373efb4[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-11T23:06:01,377 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d462fa545078,39185,1733958356715] 2024-12-11T23:06:01,387 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d462fa545078,39185,1733958356715 already deleted, retry=false 2024-12-11T23:06:01,387 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d462fa545078,39185,1733958356715 expired; onlineServers=1 2024-12-11T23:06:01,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,477 INFO [RS:2;d462fa545078:39185 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:01,477 INFO [RS:2;d462fa545078:39185 {}] regionserver.HRegionServer(1031): Exiting; stopping=d462fa545078,39185,1733958356715; zookeeper connection closed. 2024-12-11T23:06:01,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39185-0x1001720cf410003, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,478 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18669ef0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18669ef0 2024-12-11T23:06:01,544 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(976): stopping server d462fa545078,45367,1733958356676; all regions closed. 2024-12-11T23:06:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741829_1019 (size=2751) 2024-12-11T23:06:01,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_1073741829_1019 (size=2751) 2024-12-11T23:06:01,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741829_1019 (size=2751) 2024-12-11T23:06:01,556 DEBUG [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs 2024-12-11T23:06:01,556 INFO [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d462fa545078%2C45367%2C1733958356676.meta:.meta(num 1733958358804) 2024-12-11T23:06:01,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741826_1016 (size=93) 2024-12-11T23:06:01,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741826_1016 (size=93) 2024-12-11T23:06:01,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_1073741826_1016 (size=93) 2024-12-11T23:06:01,562 DEBUG [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/oldWALs 2024-12-11T23:06:01,562 INFO [RS:1;d462fa545078:45367 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d462fa545078%2C45367%2C1733958356676:(num 1733958358386) 2024-12-11T23:06:01,562 DEBUG [RS:1;d462fa545078:45367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:01,562 INFO [RS:1;d462fa545078:45367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:01,562 INFO [RS:1;d462fa545078:45367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:01,563 INFO [RS:1;d462fa545078:45367 {}] hbase.ChoreService(370): Chore service for: regionserver/d462fa545078:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:01,563 INFO [RS:1;d462fa545078:45367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:01,563 INFO [regionserver/d462fa545078:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:01,563 INFO [RS:1;d462fa545078:45367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45367 2024-12-11T23:06:01,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d462fa545078,45367,1733958356676 2024-12-11T23:06:01,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:01,575 INFO [RS:1;d462fa545078:45367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:01,587 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d462fa545078,45367,1733958356676] 2024-12-11T23:06:01,597 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d462fa545078,45367,1733958356676 already deleted, retry=false 2024-12-11T23:06:01,598 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d462fa545078,45367,1733958356676 expired; onlineServers=0 2024-12-11T23:06:01,598 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd462fa545078,32963,1733958355833' ***** 2024-12-11T23:06:01,598 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T23:06:01,598 INFO [M:0;d462fa545078:32963 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:01,598 INFO [M:0;d462fa545078:32963 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:01,598 DEBUG [M:0;d462fa545078:32963 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T23:06:01,598 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T23:06:01,599 DEBUG [M:0;d462fa545078:32963 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T23:06:01,599 DEBUG [master/d462fa545078:0:becomeActiveMaster-HFileCleaner.small.0-1733958357985 {}] cleaner.HFileCleaner(306): Exit Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.small.0-1733958357985,5,FailOnTimeoutGroup] 2024-12-11T23:06:01,599 DEBUG [master/d462fa545078:0:becomeActiveMaster-HFileCleaner.large.0-1733958357983 {}] cleaner.HFileCleaner(306): Exit Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.large.0-1733958357983,5,FailOnTimeoutGroup] 2024-12-11T23:06:01,599 INFO [M:0;d462fa545078:32963 {}] hbase.ChoreService(370): Chore service for: master/d462fa545078:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:01,599 INFO [M:0;d462fa545078:32963 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:01,599 DEBUG [M:0;d462fa545078:32963 {}] master.HMaster(1795): Stopping service threads 2024-12-11T23:06:01,599 INFO [M:0;d462fa545078:32963 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T23:06:01,600 INFO [M:0;d462fa545078:32963 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T23:06:01,601 INFO [M:0;d462fa545078:32963 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T23:06:01,601 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T23:06:01,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T23:06:01,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:01,609 DEBUG [M:0;d462fa545078:32963 {}] zookeeper.ZKUtil(347): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T23:06:01,609 WARN [M:0;d462fa545078:32963 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T23:06:01,610 INFO [M:0;d462fa545078:32963 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/.lastflushedseqids 2024-12-11T23:06:01,621 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,621 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:45586 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45586 dst: /127.0.0.1:39435 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-11T23:06:01,629 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,629 INFO [M:0;d462fa545078:32963 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T23:06:01,629 INFO [M:0;d462fa545078:32963 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T23:06:01,629 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T23:06:01,629 INFO [M:0;d462fa545078:32963 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:01,629 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:01,629 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T23:06:01,629 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:01,630 INFO [M:0;d462fa545078:32963 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-11T23:06:01,647 DEBUG [M:0;d462fa545078:32963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c97fa55d392c47c6b66d932ac9a04ac0 is 82, key is hbase:meta,,1/info:regioninfo/1733958358885/Put/seqid=0 2024-12-11T23:06:01,649 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,649 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:58278 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58278 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-11T23:06:01,656 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,656 INFO [M:0;d462fa545078:32963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c97fa55d392c47c6b66d932ac9a04ac0 2024-12-11T23:06:01,677 DEBUG [M:0;d462fa545078:32963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e7a7374ffc28490488cbeefd30c2aadd is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733958359688/Put/seqid=0 2024-12-11T23:06:01,679 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,679 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,681 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:58292 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58292 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-11T23:06:01,687 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,687 INFO [RS:1;d462fa545078:45367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:01,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45367-0x1001720cf410002, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,687 INFO [RS:1;d462fa545078:45367 {}] regionserver.HRegionServer(1031): Exiting; stopping=d462fa545078,45367,1733958356676; zookeeper connection closed. 2024-12-11T23:06:01,687 INFO [M:0;d462fa545078:32963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e7a7374ffc28490488cbeefd30c2aadd 2024-12-11T23:06:01,692 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@c4ce9c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@c4ce9c 2024-12-11T23:06:01,693 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T23:06:01,708 DEBUG [M:0;d462fa545078:32963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b7b4c7c2a8fd45809d72c60758515330 is 69, key is d462fa545078,39185,1733958356715/rs:state/1733958358078/Put/seqid=0 2024-12-11T23:06:01,709 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,710 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T23:06:01,712 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972760579_22 at /127.0.0.1:45594 [Receiving block BP-109485183-172.17.0.2-1733958351034:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45594 dst: /127.0.0.1:39435 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:06:01,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-11T23:06:01,716 WARN [M:0;d462fa545078:32963 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T23:06:01,716 INFO [M:0;d462fa545078:32963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b7b4c7c2a8fd45809d72c60758515330 2024-12-11T23:06:01,725 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c97fa55d392c47c6b66d932ac9a04ac0 as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c97fa55d392c47c6b66d932ac9a04ac0 2024-12-11T23:06:01,732 INFO [M:0;d462fa545078:32963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c97fa55d392c47c6b66d932ac9a04ac0, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T23:06:01,734 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e7a7374ffc28490488cbeefd30c2aadd as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e7a7374ffc28490488cbeefd30c2aadd 2024-12-11T23:06:01,740 INFO [M:0;d462fa545078:32963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e7a7374ffc28490488cbeefd30c2aadd, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T23:06:01,742 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b7b4c7c2a8fd45809d72c60758515330 as hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b7b4c7c2a8fd45809d72c60758515330 2024-12-11T23:06:01,749 INFO [M:0;d462fa545078:32963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b7b4c7c2a8fd45809d72c60758515330, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T23:06:01,751 INFO [M:0;d462fa545078:32963 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false 2024-12-11T23:06:01,752 INFO [M:0;d462fa545078:32963 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:01,752 DEBUG [M:0;d462fa545078:32963 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733958361629Disabling compacts and flushes for region at 1733958361629Disabling writes for close at 1733958361629Obtaining lock to block concurrent updates at 1733958361630 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733958361630Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733958361630Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733958361631 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733958361631Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733958361647 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733958361647Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733958361662 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733958361676 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733958361676Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733958361695 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733958361707 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733958361708 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@558ea4b9: reopening flushed file at 1733958361723 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@571a425c: reopening flushed file at 1733958361732 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c11c0b4: reopening flushed file at 1733958361741 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false at 1733958361751 (+10 ms)Writing region close event to WAL at 1733958361752 (+1 ms)Closed at 1733958361752 2024-12-11T23:06:01,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39927 is added to blk_1073741825_1011 (size=32674) 2024-12-11T23:06:01,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741825_1011 (size=32674) 2024-12-11T23:06:01,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39435 is added to blk_1073741825_1011 (size=32674) 2024-12-11T23:06:01,756 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:01,756 INFO [M:0;d462fa545078:32963 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T23:06:01,756 INFO [M:0;d462fa545078:32963 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32963 2024-12-11T23:06:01,756 INFO [M:0;d462fa545078:32963 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:01,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,866 INFO [M:0;d462fa545078:32963 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:01,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32963-0x1001720cf410000, quorum=127.0.0.1:65191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:01,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@793c26cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:01,899 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e0ab7c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:01,899 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:01,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36ef30b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:01,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f4b982a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:01,901 WARN [BP-109485183-172.17.0.2-1733958351034 heartbeating to localhost/127.0.0.1:39211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:06:01,901 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:06:01,902 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:06:01,902 WARN [BP-109485183-172.17.0.2-1733958351034 heartbeating to localhost/127.0.0.1:39211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109485183-172.17.0.2-1733958351034 (Datanode Uuid 47107289-7e50-4755-a3e0-4cc78a0f053d) service to localhost/127.0.0.1:39211 2024-12-11T23:06:01,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data5/current/BP-109485183-172.17.0.2-1733958351034 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:01,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data6/current/BP-109485183-172.17.0.2-1733958351034 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:01,903 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:06:01,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43fe7c8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:01,905 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6fecef94{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:01,905 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:01,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e752fdd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:01,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f04a93b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:01,907 WARN [BP-109485183-172.17.0.2-1733958351034 heartbeating to localhost/127.0.0.1:39211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:06:01,907 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:06:01,907 WARN [BP-109485183-172.17.0.2-1733958351034 heartbeating to localhost/127.0.0.1:39211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109485183-172.17.0.2-1733958351034 (Datanode Uuid 6f23d976-b72f-47d8-9209-bd40fcc68143) service to localhost/127.0.0.1:39211 2024-12-11T23:06:01,907 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:06:01,907 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data3/current/BP-109485183-172.17.0.2-1733958351034 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:01,908 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data4/current/BP-109485183-172.17.0.2-1733958351034 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:01,908 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:06:01,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@103787ab{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:01,914 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23a1a692{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:01,914 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:01,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79f7513c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:01,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d15633c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:01,915 WARN [BP-109485183-172.17.0.2-1733958351034 heartbeating to localhost/127.0.0.1:39211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:06:01,915 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:06:01,915 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:06:01,915 WARN [BP-109485183-172.17.0.2-1733958351034 heartbeating to localhost/127.0.0.1:39211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109485183-172.17.0.2-1733958351034 (Datanode Uuid c9441099-1121-4f89-bff3-97b0d17d22c3) service to localhost/127.0.0.1:39211 2024-12-11T23:06:01,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data1/current/BP-109485183-172.17.0.2-1733958351034 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:01,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/cluster_ac026108-a70b-bb12-f30d-c193185de63a/data/data2/current/BP-109485183-172.17.0.2-1733958351034 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:01,916 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:06:01,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63603efd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T23:06:01,927 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f607d40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:01,927 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:01,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e67f021{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:01,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77f7f078{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:01,936 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T23:06:01,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T23:06:01,975 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=93 (was 162), OpenFileDescriptor=449 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=362 (was 350) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9494 (was 9806) 2024-12-11T23:06:01,980 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=93, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=362, ProcessCount=11, AvailableMemoryMB=9494 2024-12-11T23:06:01,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T23:06:01,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.log.dir so I do NOT create it in target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8 2024-12-11T23:06:01,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9af44ce3-2160-5c53-e56f-ca5e10061e3d/hadoop.tmp.dir so I do NOT create it in target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8 2024-12-11T23:06:01,980 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8, deleteOnExit=true 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/test.cache.data in system properties and HBase conf 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir in system properties and HBase conf 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T23:06:01,981 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T23:06:01,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/nfs.dump.dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/java.io.tmpdir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T23:06:01,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T23:06:02,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:06:02,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:06:02,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:06:02,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:06:02,327 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T23:06:02,327 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:06:02,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43c9e816{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:06:02,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce053d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:06:02,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3306e0d7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/java.io.tmpdir/jetty-localhost-34397-hadoop-hdfs-3_4_1-tests_jar-_-any-8258140652134767855/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T23:06:02,419 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@28dc8530{HTTP/1.1, (http/1.1)}{localhost:34397} 2024-12-11T23:06:02,419 INFO [Time-limited test {}] server.Server(415): Started @13148ms 2024-12-11T23:06:02,785 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:06:02,788 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:06:02,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:06:02,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:06:02,790 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T23:06:02,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f37eae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:06:02,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c42eb95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:06:02,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@798a0a47{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/java.io.tmpdir/jetty-localhost-41301-hadoop-hdfs-3_4_1-tests_jar-_-any-1310705819543080680/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:02,881 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10e8516e{HTTP/1.1, (http/1.1)}{localhost:41301} 2024-12-11T23:06:02,881 INFO [Time-limited test {}] server.Server(415): Started @13611ms 2024-12-11T23:06:02,882 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T23:06:02,916 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:06:02,918 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:06:02,920 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:06:02,920 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:06:02,920 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T23:06:02,921 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44b888f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:06:02,921 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43ceb76a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:06:03,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@577871b5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/java.io.tmpdir/jetty-localhost-45513-hadoop-hdfs-3_4_1-tests_jar-_-any-13246795747435319036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:03,011 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@211473f7{HTTP/1.1, (http/1.1)}{localhost:45513} 2024-12-11T23:06:03,011 INFO [Time-limited test {}] server.Server(415): Started @13741ms 2024-12-11T23:06:03,012 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T23:06:03,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T23:06:03,041 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T23:06:03,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T23:06:03,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T23:06:03,042 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T23:06:03,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eb20002{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,AVAILABLE} 2024-12-11T23:06:03,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5422797{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T23:06:03,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b49a11e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/java.io.tmpdir/jetty-localhost-37489-hadoop-hdfs-3_4_1-tests_jar-_-any-7209990192720931715/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:03,140 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@465cf8e8{HTTP/1.1, (http/1.1)}{localhost:37489} 2024-12-11T23:06:03,140 INFO [Time-limited test {}] server.Server(415): Started @13869ms 2024-12-11T23:06:03,141 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T23:06:04,266 WARN [Thread-569 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data1/current/BP-529397267-172.17.0.2-1733958362006/current, will proceed with Du for space computation calculation, 2024-12-11T23:06:04,266 WARN [Thread-570 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data2/current/BP-529397267-172.17.0.2-1733958362006/current, will proceed with Du for space computation calculation, 2024-12-11T23:06:04,284 WARN [Thread-509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T23:06:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4661361aacb89c65 with lease ID 0x803ed7c28f859954: Processing first storage report for DS-0a442b45-e769-4dcf-8403-3cefcb9ac63b from datanode DatanodeRegistration(127.0.0.1:36109, datanodeUuid=7b9ee8bb-91c3-43f3-968e-1d01661ae6fe, infoPort=33039, infoSecurePort=0, ipcPort=35943, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006) 2024-12-11T23:06:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4661361aacb89c65 with lease ID 0x803ed7c28f859954: from storage DS-0a442b45-e769-4dcf-8403-3cefcb9ac63b node DatanodeRegistration(127.0.0.1:36109, datanodeUuid=7b9ee8bb-91c3-43f3-968e-1d01661ae6fe, infoPort=33039, infoSecurePort=0, ipcPort=35943, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:06:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4661361aacb89c65 with lease ID 0x803ed7c28f859954: Processing first storage report for DS-48836aa2-172f-4dba-9dad-26e65552fd73 from datanode DatanodeRegistration(127.0.0.1:36109, datanodeUuid=7b9ee8bb-91c3-43f3-968e-1d01661ae6fe, infoPort=33039, infoSecurePort=0, ipcPort=35943, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006) 2024-12-11T23:06:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4661361aacb89c65 with lease ID 0x803ed7c28f859954: from storage DS-48836aa2-172f-4dba-9dad-26e65552fd73 node DatanodeRegistration(127.0.0.1:36109, datanodeUuid=7b9ee8bb-91c3-43f3-968e-1d01661ae6fe, infoPort=33039, infoSecurePort=0, ipcPort=35943, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:06:04,349 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-11T23:06:04,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T23:06:04,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T23:06:04,438 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data3/current/BP-529397267-172.17.0.2-1733958362006/current, will proceed with Du for space computation calculation, 2024-12-11T23:06:04,438 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data4/current/BP-529397267-172.17.0.2-1733958362006/current, will proceed with Du for space computation calculation, 2024-12-11T23:06:04,455 WARN [Thread-532 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T23:06:04,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ce6d0b79cf5c036 with lease ID 0x803ed7c28f859955: Processing first storage report for DS-7fdd639d-b229-4406-9c5f-aec632eb03ac from datanode DatanodeRegistration(127.0.0.1:45409, datanodeUuid=2eac4407-729b-4c54-b584-649af68dbefd, infoPort=41685, infoSecurePort=0, ipcPort=37011, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006) 2024-12-11T23:06:04,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ce6d0b79cf5c036 with lease ID 0x803ed7c28f859955: from storage DS-7fdd639d-b229-4406-9c5f-aec632eb03ac node DatanodeRegistration(127.0.0.1:45409, datanodeUuid=2eac4407-729b-4c54-b584-649af68dbefd, infoPort=41685, infoSecurePort=0, ipcPort=37011, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:06:04,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ce6d0b79cf5c036 with lease ID 0x803ed7c28f859955: Processing first storage report for DS-25032359-27d6-4399-9d27-446589ad31eb from datanode DatanodeRegistration(127.0.0.1:45409, datanodeUuid=2eac4407-729b-4c54-b584-649af68dbefd, infoPort=41685, infoSecurePort=0, ipcPort=37011, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006) 2024-12-11T23:06:04,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ce6d0b79cf5c036 with lease ID 0x803ed7c28f859955: from storage DS-25032359-27d6-4399-9d27-446589ad31eb node DatanodeRegistration(127.0.0.1:45409, datanodeUuid=2eac4407-729b-4c54-b584-649af68dbefd, infoPort=41685, infoSecurePort=0, ipcPort=37011, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:06:04,489 WARN [Thread-592 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data5/current/BP-529397267-172.17.0.2-1733958362006/current, will proceed with Du for space computation calculation, 2024-12-11T23:06:04,489 WARN [Thread-593 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data6/current/BP-529397267-172.17.0.2-1733958362006/current, will proceed with Du for space computation calculation, 2024-12-11T23:06:04,506 WARN [Thread-554 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T23:06:04,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9465865cd3e78ca5 with lease ID 0x803ed7c28f859956: Processing first storage report for DS-8358ff98-1fba-4eb5-ad1a-15827423ad5b from datanode DatanodeRegistration(127.0.0.1:37845, datanodeUuid=0400299d-6aef-4ca3-bb0b-086f4671412c, infoPort=39665, infoSecurePort=0, ipcPort=32843, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006) 2024-12-11T23:06:04,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9465865cd3e78ca5 with lease ID 0x803ed7c28f859956: from storage DS-8358ff98-1fba-4eb5-ad1a-15827423ad5b node DatanodeRegistration(127.0.0.1:37845, datanodeUuid=0400299d-6aef-4ca3-bb0b-086f4671412c, infoPort=39665, infoSecurePort=0, ipcPort=32843, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T23:06:04,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9465865cd3e78ca5 with lease ID 0x803ed7c28f859956: Processing first storage report for DS-c13aad25-0a4c-4ca8-806d-4dd2b9fcaec9 from datanode DatanodeRegistration(127.0.0.1:37845, datanodeUuid=0400299d-6aef-4ca3-bb0b-086f4671412c, infoPort=39665, infoSecurePort=0, ipcPort=32843, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006) 2024-12-11T23:06:04,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9465865cd3e78ca5 with lease ID 0x803ed7c28f859956: from storage DS-c13aad25-0a4c-4ca8-806d-4dd2b9fcaec9 node DatanodeRegistration(127.0.0.1:37845, datanodeUuid=0400299d-6aef-4ca3-bb0b-086f4671412c, infoPort=39665, infoSecurePort=0, ipcPort=32843, storageInfo=lv=-57;cid=testClusterID;nsid=1689124416;c=1733958362006), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T23:06:04,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8 2024-12-11T23:06:04,584 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/zookeeper_0, clientPort=49347, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T23:06:04,585 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49347 2024-12-11T23:06:04,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741825_1001 (size=7) 2024-12-11T23:06:04,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741825_1001 (size=7) 2024-12-11T23:06:04,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741825_1001 (size=7) 2024-12-11T23:06:04,603 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52 with version=8 2024-12-11T23:06:04,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39211/user/jenkins/test-data/1c8ad93b-1232-3903-bcb5-74531971e323/hbase-staging 2024-12-11T23:06:04,606 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T23:06:04,606 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:06:04,607 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41025 2024-12-11T23:06:04,608 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41025 connecting to ZooKeeper ensemble=127.0.0.1:49347 2024-12-11T23:06:04,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410250x0, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:06:04,658 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41025-0x1001720f46e0000 connected 2024-12-11T23:06:04,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:04,755 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52, hbase.cluster.distributed=false 2024-12-11T23:06:04,758 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:06:04,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41025 2024-12-11T23:06:04,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41025 2024-12-11T23:06:04,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41025 2024-12-11T23:06:04,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41025 2024-12-11T23:06:04,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41025 2024-12-11T23:06:04,772 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:06:04,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,772 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:06:04,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:06:04,773 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T23:06:04,773 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:06:04,773 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34161 2024-12-11T23:06:04,775 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34161 connecting to ZooKeeper ensemble=127.0.0.1:49347 2024-12-11T23:06:04,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341610x0, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:06:04,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:04,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34161-0x1001720f46e0001 connected 2024-12-11T23:06:04,788 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T23:06:04,788 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T23:06:04,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T23:06:04,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:06:04,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34161 2024-12-11T23:06:04,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34161 2024-12-11T23:06:04,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34161 2024-12-11T23:06:04,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34161 2024-12-11T23:06:04,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34161 2024-12-11T23:06:04,809 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:06:04,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,809 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:06:04,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:06:04,809 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T23:06:04,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:06:04,810 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37223 2024-12-11T23:06:04,811 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37223 connecting to ZooKeeper ensemble=127.0.0.1:49347 2024-12-11T23:06:04,812 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372230x0, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:06:04,828 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37223-0x1001720f46e0002 connected 2024-12-11T23:06:04,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:04,829 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T23:06:04,830 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T23:06:04,830 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T23:06:04,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:06:04,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37223 2024-12-11T23:06:04,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37223 2024-12-11T23:06:04,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37223 2024-12-11T23:06:04,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37223 2024-12-11T23:06:04,834 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37223 2024-12-11T23:06:04,851 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d462fa545078:0 server-side Connection retries=45 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T23:06:04,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T23:06:04,852 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45549 2024-12-11T23:06:04,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45549 connecting to ZooKeeper ensemble=127.0.0.1:49347 2024-12-11T23:06:04,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455490x0, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T23:06:04,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:04,870 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45549-0x1001720f46e0003 connected 2024-12-11T23:06:04,871 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T23:06:04,871 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T23:06:04,872 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T23:06:04,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T23:06:04,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45549 2024-12-11T23:06:04,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45549 2024-12-11T23:06:04,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45549 2024-12-11T23:06:04,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45549 2024-12-11T23:06:04,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45549 2024-12-11T23:06:04,888 DEBUG [M:0;d462fa545078:41025 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d462fa545078:41025 2024-12-11T23:06:04,888 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d462fa545078,41025,1733958364605 2024-12-11T23:06:04,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,902 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d462fa545078,41025,1733958364605 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,914 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T23:06:04,915 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d462fa545078,41025,1733958364605 from backup master directory 2024-12-11T23:06:04,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d462fa545078,41025,1733958364605 2024-12-11T23:06:04,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T23:06:04,924 WARN [master/d462fa545078:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:06:04,924 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d462fa545078,41025,1733958364605 2024-12-11T23:06:04,932 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/hbase.id] with ID: 51a34bd0-e447-4211-be77-65154a128695 2024-12-11T23:06:04,932 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/.tmp/hbase.id 2024-12-11T23:06:04,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741826_1002 (size=42) 2024-12-11T23:06:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741826_1002 (size=42) 2024-12-11T23:06:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741826_1002 (size=42) 2024-12-11T23:06:04,945 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/.tmp/hbase.id]:[hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/hbase.id] 2024-12-11T23:06:04,961 INFO [master/d462fa545078:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T23:06:04,961 INFO [master/d462fa545078:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T23:06:04,962 INFO [master/d462fa545078:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-11T23:06:04,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:04,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741827_1003 (size=196) 2024-12-11T23:06:04,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741827_1003 (size=196) 2024-12-11T23:06:04,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741827_1003 (size=196) 2024-12-11T23:06:04,988 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:06:04,988 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T23:06:04,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T23:06:05,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741828_1004 (size=1189) 2024-12-11T23:06:05,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741828_1004 (size=1189) 2024-12-11T23:06:05,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741828_1004 (size=1189) 2024-12-11T23:06:05,002 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store 2024-12-11T23:06:05,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741829_1005 (size=34) 2024-12-11T23:06:05,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741829_1005 (size=34) 2024-12-11T23:06:05,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741829_1005 (size=34) 2024-12-11T23:06:05,012 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:06:05,012 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T23:06:05,012 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:05,012 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:05,012 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T23:06:05,012 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:05,012 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:05,012 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733958365012Disabling compacts and flushes for region at 1733958365012Disabling writes for close at 1733958365012Writing region close event to WAL at 1733958365012Closed at 1733958365012 2024-12-11T23:06:05,013 WARN [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/.initializing 2024-12-11T23:06:05,013 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/WALs/d462fa545078,41025,1733958364605 2024-12-11T23:06:05,018 INFO [master/d462fa545078:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C41025%2C1733958364605, suffix=, logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/WALs/d462fa545078,41025,1733958364605, archiveDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/oldWALs, maxLogs=10 2024-12-11T23:06:05,019 INFO [master/d462fa545078:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d462fa545078%2C41025%2C1733958364605.1733958365018 2024-12-11T23:06:05,028 INFO [master/d462fa545078:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/WALs/d462fa545078,41025,1733958364605/d462fa545078%2C41025%2C1733958364605.1733958365018 2024-12-11T23:06:05,030 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41685:41685),(127.0.0.1/127.0.0.1:39665:39665),(127.0.0.1/127.0.0.1:33039:33039)] 2024-12-11T23:06:05,032 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:06:05,033 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:06:05,033 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,033 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T23:06:05,036 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T23:06:05,039 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:06:05,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T23:06:05,042 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:06:05,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T23:06:05,046 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:06:05,046 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,047 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,047 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,049 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,049 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,049 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T23:06:05,051 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T23:06:05,054 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:06:05,054 INFO [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65722929, jitterRate=-0.02065204083919525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T23:06:05,056 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733958365033Initializing all the Stores at 1733958365034 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365034Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958365034Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958365034Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958365034Cleaning up temporary data from old regions at 1733958365049 (+15 ms)Region opened successfully at 1733958365056 (+7 ms) 2024-12-11T23:06:05,056 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T23:06:05,061 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bd30b5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:06:05,062 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T23:06:05,062 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T23:06:05,062 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T23:06:05,062 INFO [master/d462fa545078:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T23:06:05,063 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-11T23:06:05,064 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-11T23:06:05,064 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T23:06:05,066 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T23:06:05,068 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T23:06:05,080 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T23:06:05,081 INFO [master/d462fa545078:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T23:06:05,082 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T23:06:05,092 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T23:06:05,093 INFO [master/d462fa545078:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T23:06:05,094 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T23:06:05,102 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T23:06:05,104 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T23:06:05,113 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T23:06:05,115 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T23:06:05,123 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,135 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d462fa545078,41025,1733958364605, sessionid=0x1001720f46e0000, setting cluster-up flag (Was=false) 2024-12-11T23:06:05,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,187 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T23:06:05,188 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d462fa545078,41025,1733958364605 2024-12-11T23:06:05,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,240 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T23:06:05,244 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d462fa545078,41025,1733958364605 2024-12-11T23:06:05,248 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T23:06:05,252 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T23:06:05,252 INFO [master/d462fa545078:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T23:06:05,253 INFO [master/d462fa545078:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T23:06:05,253 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d462fa545078,41025,1733958364605 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d462fa545078:0, corePoolSize=5, maxPoolSize=5 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d462fa545078:0, corePoolSize=10, maxPoolSize=10 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:06:05,255 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733958395257 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T23:06:05,257 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T23:06:05,258 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,258 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T23:06:05,258 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T23:06:05,258 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T23:06:05,258 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T23:06:05,258 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T23:06:05,259 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T23:06:05,259 INFO [master/d462fa545078:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T23:06:05,259 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,259 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T23:06:05,262 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.large.0-1733958365259,5,FailOnTimeoutGroup] 2024-12-11T23:06:05,262 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.small.0-1733958365262,5,FailOnTimeoutGroup] 2024-12-11T23:06:05,262 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,262 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T23:06:05,262 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,262 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741831_1007 (size=1321) 2024-12-11T23:06:05,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741831_1007 (size=1321) 2024-12-11T23:06:05,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741831_1007 (size=1321) 2024-12-11T23:06:05,271 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T23:06:05,271 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52 2024-12-11T23:06:05,277 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(746): ClusterId : 51a34bd0-e447-4211-be77-65154a128695 2024-12-11T23:06:05,277 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(746): ClusterId : 51a34bd0-e447-4211-be77-65154a128695 2024-12-11T23:06:05,277 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T23:06:05,277 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T23:06:05,280 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(746): ClusterId : 51a34bd0-e447-4211-be77-65154a128695 2024-12-11T23:06:05,280 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T23:06:05,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741832_1008 (size=32) 2024-12-11T23:06:05,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741832_1008 (size=32) 2024-12-11T23:06:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741832_1008 (size=32) 2024-12-11T23:06:05,284 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:06:05,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T23:06:05,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T23:06:05,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T23:06:05,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T23:06:05,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T23:06:05,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T23:06:05,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T23:06:05,292 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T23:06:05,292 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T23:06:05,292 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T23:06:05,293 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T23:06:05,293 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T23:06:05,293 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T23:06:05,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T23:06:05,294 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T23:06:05,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740 2024-12-11T23:06:05,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740 2024-12-11T23:06:05,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T23:06:05,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T23:06:05,298 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T23:06:05,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T23:06:05,301 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:06:05,302 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69963039, jitterRate=0.042530521750450134}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T23:06:05,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733958365284Initializing all the Stores at 1733958365285 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365285Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365285Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958365285Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365285Cleaning up temporary data from old regions at 1733958365297 (+12 ms)Region opened successfully at 1733958365303 (+6 ms) 2024-12-11T23:06:05,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T23:06:05,303 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T23:06:05,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T23:06:05,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T23:06:05,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T23:06:05,304 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T23:06:05,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733958365303Disabling compacts and flushes for region at 1733958365303Disabling writes for close at 1733958365303Writing region close event to WAL at 1733958365304 (+1 ms)Closed at 1733958365304 2024-12-11T23:06:05,305 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T23:06:05,305 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T23:06:05,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T23:06:05,307 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T23:06:05,308 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T23:06:05,314 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T23:06:05,314 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T23:06:05,314 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T23:06:05,314 DEBUG [RS:0;d462fa545078:34161 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d1cc787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:06:05,314 DEBUG [RS:1;d462fa545078:37223 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3565a677, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:06:05,314 DEBUG [RS:2;d462fa545078:45549 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@773cbd75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d462fa545078/172.17.0.2:0 2024-12-11T23:06:05,323 DEBUG [RS:2;d462fa545078:45549 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;d462fa545078:45549 2024-12-11T23:06:05,323 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d462fa545078:34161 2024-12-11T23:06:05,324 INFO [RS:0;d462fa545078:34161 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T23:06:05,324 INFO [RS:2;d462fa545078:45549 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T23:06:05,324 INFO [RS:0;d462fa545078:34161 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T23:06:05,324 INFO [RS:2;d462fa545078:45549 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T23:06:05,324 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T23:06:05,324 DEBUG [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T23:06:05,324 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;d462fa545078:37223 2024-12-11T23:06:05,324 INFO [RS:1;d462fa545078:37223 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T23:06:05,324 INFO [RS:1;d462fa545078:37223 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T23:06:05,324 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T23:06:05,324 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(2659): reportForDuty to master=d462fa545078,41025,1733958364605 with port=45549, startcode=1733958364851 2024-12-11T23:06:05,324 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(2659): reportForDuty to master=d462fa545078,41025,1733958364605 with port=34161, startcode=1733958364772 2024-12-11T23:06:05,325 DEBUG [RS:2;d462fa545078:45549 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T23:06:05,325 DEBUG [RS:0;d462fa545078:34161 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T23:06:05,325 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(2659): reportForDuty to master=d462fa545078,41025,1733958364605 with port=37223, startcode=1733958364809 2024-12-11T23:06:05,325 DEBUG [RS:1;d462fa545078:37223 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T23:06:05,327 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45605, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T23:06:05,327 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36181, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T23:06:05,327 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48359, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T23:06:05,328 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41025 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d462fa545078,37223,1733958364809 2024-12-11T23:06:05,328 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41025 {}] master.ServerManager(517): Registering regionserver=d462fa545078,37223,1733958364809 2024-12-11T23:06:05,330 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41025 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d462fa545078,45549,1733958364851 2024-12-11T23:06:05,330 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41025 {}] master.ServerManager(517): Registering regionserver=d462fa545078,45549,1733958364851 2024-12-11T23:06:05,330 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52 2024-12-11T23:06:05,330 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45825 2024-12-11T23:06:05,330 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T23:06:05,332 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41025 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d462fa545078,34161,1733958364772 2024-12-11T23:06:05,332 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41025 {}] master.ServerManager(517): Registering regionserver=d462fa545078,34161,1733958364772 2024-12-11T23:06:05,332 DEBUG [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52 2024-12-11T23:06:05,332 DEBUG [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45825 2024-12-11T23:06:05,332 DEBUG [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T23:06:05,334 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52 2024-12-11T23:06:05,334 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45825 2024-12-11T23:06:05,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:05,334 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T23:06:05,375 DEBUG [RS:1;d462fa545078:37223 {}] zookeeper.ZKUtil(111): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d462fa545078,37223,1733958364809 2024-12-11T23:06:05,375 WARN [RS:1;d462fa545078:37223 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:06:05,375 INFO [RS:1;d462fa545078:37223 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T23:06:05,376 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,37223,1733958364809 2024-12-11T23:06:05,397 DEBUG [RS:2;d462fa545078:45549 {}] zookeeper.ZKUtil(111): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d462fa545078,45549,1733958364851 2024-12-11T23:06:05,397 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d462fa545078,45549,1733958364851] 2024-12-11T23:06:05,397 WARN [RS:2;d462fa545078:45549 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:06:05,397 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d462fa545078,34161,1733958364772] 2024-12-11T23:06:05,397 INFO [RS:2;d462fa545078:45549 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T23:06:05,397 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d462fa545078,37223,1733958364809] 2024-12-11T23:06:05,398 DEBUG [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,45549,1733958364851 2024-12-11T23:06:05,398 DEBUG [RS:0;d462fa545078:34161 {}] zookeeper.ZKUtil(111): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d462fa545078,34161,1733958364772 2024-12-11T23:06:05,398 WARN [RS:0;d462fa545078:34161 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T23:06:05,398 INFO [RS:0;d462fa545078:34161 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T23:06:05,398 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,34161,1733958364772 2024-12-11T23:06:05,400 INFO [RS:1;d462fa545078:37223 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T23:06:05,405 INFO [RS:0;d462fa545078:34161 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T23:06:05,405 INFO [RS:2;d462fa545078:45549 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T23:06:05,405 INFO [RS:1;d462fa545078:37223 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T23:06:05,405 INFO [RS:1;d462fa545078:37223 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T23:06:05,406 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,406 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T23:06:05,407 INFO [RS:1;d462fa545078:37223 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T23:06:05,407 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,407 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,407 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,407 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 INFO [RS:0;d462fa545078:34161 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 INFO [RS:0;d462fa545078:34161 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,408 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:06:05,409 DEBUG [RS:1;d462fa545078:37223 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:06:05,409 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T23:06:05,410 INFO [RS:0;d462fa545078:34161 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T23:06:05,410 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,410 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,411 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,411 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,411 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:06:05,411 DEBUG [RS:0;d462fa545078:34161 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:06:05,412 INFO [RS:2;d462fa545078:45549 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T23:06:05,418 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:2;d462fa545078:45549 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T23:06:05,418 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,37223,1733958364809-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:06:05,418 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,419 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T23:06:05,419 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,34161,1733958364772-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:06:05,420 INFO [RS:2;d462fa545078:45549 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T23:06:05,420 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d462fa545078:0, corePoolSize=2, maxPoolSize=2 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,420 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,421 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,421 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d462fa545078:0, corePoolSize=1, maxPoolSize=1 2024-12-11T23:06:05,421 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:06:05,421 DEBUG [RS:2;d462fa545078:45549 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0, corePoolSize=3, maxPoolSize=3 2024-12-11T23:06:05,425 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,425 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,425 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,425 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,425 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,425 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,45549,1733958364851-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:06:05,436 INFO [RS:2;d462fa545078:45549 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T23:06:05,437 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,45549,1733958364851-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,437 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,437 INFO [RS:2;d462fa545078:45549 {}] regionserver.Replication(171): d462fa545078,45549,1733958364851 started 2024-12-11T23:06:05,437 INFO [RS:0;d462fa545078:34161 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T23:06:05,437 INFO [RS:1;d462fa545078:37223 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T23:06:05,437 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,34161,1733958364772-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,437 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,37223,1733958364809-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,438 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,438 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,438 INFO [RS:1;d462fa545078:37223 {}] regionserver.Replication(171): d462fa545078,37223,1733958364809 started 2024-12-11T23:06:05,438 INFO [RS:0;d462fa545078:34161 {}] regionserver.Replication(171): d462fa545078,34161,1733958364772 started 2024-12-11T23:06:05,448 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,448 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(1482): Serving as d462fa545078,45549,1733958364851, RpcServer on d462fa545078/172.17.0.2:45549, sessionid=0x1001720f46e0003 2024-12-11T23:06:05,448 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T23:06:05,449 DEBUG [RS:2;d462fa545078:45549 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d462fa545078,45549,1733958364851 2024-12-11T23:06:05,449 DEBUG [RS:2;d462fa545078:45549 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,45549,1733958364851' 2024-12-11T23:06:05,449 DEBUG [RS:2;d462fa545078:45549 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T23:06:05,449 DEBUG [RS:2;d462fa545078:45549 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T23:06:05,450 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T23:06:05,450 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T23:06:05,450 DEBUG [RS:2;d462fa545078:45549 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d462fa545078,45549,1733958364851 2024-12-11T23:06:05,450 DEBUG [RS:2;d462fa545078:45549 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,45549,1733958364851' 2024-12-11T23:06:05,450 DEBUG [RS:2;d462fa545078:45549 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T23:06:05,450 DEBUG [RS:2;d462fa545078:45549 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T23:06:05,451 DEBUG [RS:2;d462fa545078:45549 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T23:06:05,451 INFO [RS:2;d462fa545078:45549 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T23:06:05,451 INFO [RS:2;d462fa545078:45549 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T23:06:05,456 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,456 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,456 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1482): Serving as d462fa545078,37223,1733958364809, RpcServer on d462fa545078/172.17.0.2:37223, sessionid=0x1001720f46e0002 2024-12-11T23:06:05,456 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1482): Serving as d462fa545078,34161,1733958364772, RpcServer on d462fa545078/172.17.0.2:34161, sessionid=0x1001720f46e0001 2024-12-11T23:06:05,456 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T23:06:05,456 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T23:06:05,456 DEBUG [RS:0;d462fa545078:34161 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d462fa545078,34161,1733958364772 2024-12-11T23:06:05,456 DEBUG [RS:1;d462fa545078:37223 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d462fa545078,37223,1733958364809 2024-12-11T23:06:05,456 DEBUG [RS:0;d462fa545078:34161 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,34161,1733958364772' 2024-12-11T23:06:05,456 DEBUG [RS:1;d462fa545078:37223 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,37223,1733958364809' 2024-12-11T23:06:05,456 DEBUG [RS:0;d462fa545078:34161 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T23:06:05,456 DEBUG [RS:1;d462fa545078:37223 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T23:06:05,457 DEBUG [RS:0;d462fa545078:34161 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T23:06:05,457 DEBUG [RS:1;d462fa545078:37223 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T23:06:05,457 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T23:06:05,457 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T23:06:05,457 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T23:06:05,457 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T23:06:05,457 DEBUG [RS:0;d462fa545078:34161 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d462fa545078,34161,1733958364772 2024-12-11T23:06:05,457 DEBUG [RS:1;d462fa545078:37223 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d462fa545078,37223,1733958364809 2024-12-11T23:06:05,457 DEBUG [RS:0;d462fa545078:34161 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,34161,1733958364772' 2024-12-11T23:06:05,458 DEBUG [RS:1;d462fa545078:37223 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd462fa545078,37223,1733958364809' 2024-12-11T23:06:05,458 DEBUG [RS:0;d462fa545078:34161 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T23:06:05,458 DEBUG [RS:1;d462fa545078:37223 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T23:06:05,458 DEBUG [RS:1;d462fa545078:37223 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T23:06:05,458 DEBUG [RS:0;d462fa545078:34161 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T23:06:05,458 WARN [d462fa545078:41025 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T23:06:05,458 DEBUG [RS:1;d462fa545078:37223 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T23:06:05,458 INFO [RS:1;d462fa545078:37223 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T23:06:05,458 INFO [RS:1;d462fa545078:37223 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T23:06:05,458 DEBUG [RS:0;d462fa545078:34161 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T23:06:05,459 INFO [RS:0;d462fa545078:34161 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T23:06:05,459 INFO [RS:0;d462fa545078:34161 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T23:06:05,557 INFO [RS:2;d462fa545078:45549 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C45549%2C1733958364851, suffix=, logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,45549,1733958364851, archiveDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs, maxLogs=32 2024-12-11T23:06:05,561 INFO [RS:2;d462fa545078:45549 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d462fa545078%2C45549%2C1733958364851.1733958365561 2024-12-11T23:06:05,562 INFO [RS:1;d462fa545078:37223 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C37223%2C1733958364809, suffix=, logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,37223,1733958364809, archiveDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs, maxLogs=32 2024-12-11T23:06:05,562 INFO [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C34161%2C1733958364772, suffix=, logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,34161,1733958364772, archiveDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs, maxLogs=32 2024-12-11T23:06:05,564 INFO [RS:0;d462fa545078:34161 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d462fa545078%2C34161%2C1733958364772.1733958365564 2024-12-11T23:06:05,564 INFO [RS:1;d462fa545078:37223 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d462fa545078%2C37223%2C1733958364809.1733958365564 2024-12-11T23:06:05,573 INFO [RS:2;d462fa545078:45549 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,45549,1733958364851/d462fa545078%2C45549%2C1733958364851.1733958365561 2024-12-11T23:06:05,575 DEBUG [RS:2;d462fa545078:45549 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33039:33039),(127.0.0.1/127.0.0.1:39665:39665),(127.0.0.1/127.0.0.1:41685:41685)] 2024-12-11T23:06:05,576 INFO [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,34161,1733958364772/d462fa545078%2C34161%2C1733958364772.1733958365564 2024-12-11T23:06:05,576 INFO [RS:1;d462fa545078:37223 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,37223,1733958364809/d462fa545078%2C37223%2C1733958364809.1733958365564 2024-12-11T23:06:05,577 DEBUG [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41685:41685),(127.0.0.1/127.0.0.1:33039:33039),(127.0.0.1/127.0.0.1:39665:39665)] 2024-12-11T23:06:05,577 DEBUG [RS:1;d462fa545078:37223 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39665:39665),(127.0.0.1/127.0.0.1:33039:33039),(127.0.0.1/127.0.0.1:41685:41685)] 2024-12-11T23:06:05,709 DEBUG [d462fa545078:41025 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T23:06:05,709 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(204): Hosts are {d462fa545078=0} racks are {/default-rack=0} 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:06:05,714 INFO [d462fa545078:41025 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:06:05,714 INFO [d462fa545078:41025 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:06:05,714 INFO [d462fa545078:41025 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:06:05,714 DEBUG [d462fa545078:41025 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:06:05,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d462fa545078,34161,1733958364772 2024-12-11T23:06:05,719 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d462fa545078,34161,1733958364772, state=OPENING 2024-12-11T23:06:05,733 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T23:06:05,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:05,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,746 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T23:06:05,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,747 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,747 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d462fa545078,34161,1733958364772}] 2024-12-11T23:06:05,904 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:06:05,906 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49517, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:06:05,911 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T23:06:05,911 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T23:06:05,914 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d462fa545078%2C34161%2C1733958364772.meta, suffix=.meta, logDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,34161,1733958364772, archiveDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs, maxLogs=32 2024-12-11T23:06:05,916 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d462fa545078%2C34161%2C1733958364772.meta.1733958365915.meta 2024-12-11T23:06:05,924 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/WALs/d462fa545078,34161,1733958364772/d462fa545078%2C34161%2C1733958364772.meta.1733958365915.meta 2024-12-11T23:06:05,925 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39665:39665),(127.0.0.1/127.0.0.1:33039:33039),(127.0.0.1/127.0.0.1:41685:41685)] 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T23:06:05,927 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T23:06:05,927 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T23:06:05,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T23:06:05,930 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T23:06:05,930 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T23:06:05,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T23:06:05,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T23:06:05,934 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T23:06:05,934 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T23:06:05,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T23:06:05,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:05,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T23:06:05,936 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T23:06:05,937 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740 2024-12-11T23:06:05,939 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740 2024-12-11T23:06:05,940 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T23:06:05,940 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T23:06:05,941 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T23:06:05,943 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T23:06:05,944 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73953054, jitterRate=0.10198637843132019}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T23:06:05,944 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T23:06:05,945 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733958365928Writing region info on filesystem at 1733958365928Initializing all the Stores at 1733958365929 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365929Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365929Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958365929Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733958365929Cleaning up temporary data from old regions at 1733958365940 (+11 ms)Running coprocessor post-open hooks at 1733958365944 (+4 ms)Region opened successfully at 1733958365945 (+1 ms) 2024-12-11T23:06:05,947 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733958365904 2024-12-11T23:06:05,950 DEBUG [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T23:06:05,950 INFO [RS_OPEN_META-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T23:06:05,951 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d462fa545078,34161,1733958364772 2024-12-11T23:06:05,953 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d462fa545078,34161,1733958364772, state=OPEN 2024-12-11T23:06:05,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:06:05,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:06:05,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:06:05,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T23:06:05,965 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d462fa545078,34161,1733958364772 2024-12-11T23:06:05,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T23:06:05,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T23:06:05,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d462fa545078,34161,1733958364772 in 218 msec 2024-12-11T23:06:05,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T23:06:05,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 663 msec 2024-12-11T23:06:05,974 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T23:06:05,974 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T23:06:05,975 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:06:05,976 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d462fa545078,34161,1733958364772, seqNum=-1] 2024-12-11T23:06:05,976 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:06:05,977 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59989, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:06:05,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 734 msec 2024-12-11T23:06:05,986 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733958365986, completionTime=-1 2024-12-11T23:06:05,986 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T23:06:05,986 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T23:06:05,988 INFO [master/d462fa545078:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T23:06:05,988 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733958425988 2024-12-11T23:06:05,988 INFO [master/d462fa545078:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733958485988 2024-12-11T23:06:05,988 INFO [master/d462fa545078:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-11T23:06:05,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41025,1733958364605-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41025,1733958364605-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41025,1733958364605-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d462fa545078:41025, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,989 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:05,993 DEBUG [master/d462fa545078:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.070sec 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41025,1733958364605-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T23:06:05,995 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41025,1733958364605-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T23:06:05,997 DEBUG [master/d462fa545078:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T23:06:05,997 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T23:06:05,998 INFO [master/d462fa545078:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d462fa545078,41025,1733958364605-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T23:06:06,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7175264b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:06:06,079 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d462fa545078,41025,-1 for getting cluster id 2024-12-11T23:06:06,079 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:06:06,082 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '51a34bd0-e447-4211-be77-65154a128695' 2024-12-11T23:06:06,083 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:06:06,084 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "51a34bd0-e447-4211-be77-65154a128695" 2024-12-11T23:06:06,084 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@788ffd04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:06:06,085 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d462fa545078,41025,-1] 2024-12-11T23:06:06,085 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:06:06,086 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:06,088 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:06:06,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@442a2827, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:06:06,089 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:06:06,090 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d462fa545078,34161,1733958364772, seqNum=-1] 2024-12-11T23:06:06,091 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:06:06,093 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34188, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:06:06,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d462fa545078,41025,1733958364605 2024-12-11T23:06:06,097 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T23:06:06,098 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is d462fa545078,41025,1733958364605 2024-12-11T23:06:06,098 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@35574921 2024-12-11T23:06:06,098 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T23:06:06,100 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54176, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T23:06:06,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:06:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T23:06:06,105 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:06:06,105 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:06,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T23:06:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:06:06,107 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:06:06,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741837_1013 (size=392) 2024-12-11T23:06:06,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741837_1013 (size=392) 2024-12-11T23:06:06,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741837_1013 (size=392) 2024-12-11T23:06:06,117 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 594a3148205895043d8eee980c286c18, NAME => 'TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52 2024-12-11T23:06:06,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741838_1014 (size=51) 2024-12-11T23:06:06,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741838_1014 (size=51) 2024-12-11T23:06:06,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741838_1014 (size=51) 2024-12-11T23:06:06,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:06:06,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 594a3148205895043d8eee980c286c18, disabling compactions & flushes 2024-12-11T23:06:06,127 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. after waiting 0 ms 2024-12-11T23:06:06,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,127 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 594a3148205895043d8eee980c286c18: Waiting for close lock at 1733958366127Disabling compacts and flushes for region at 1733958366127Disabling writes for close at 1733958366127Writing region close event to WAL at 1733958366127Closed at 1733958366127 2024-12-11T23:06:06,129 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:06:06,129 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733958366129"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958366129"}]},"ts":"1733958366129"} 2024-12-11T23:06:06,132 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T23:06:06,134 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:06:06,134 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958366134"}]},"ts":"1733958366134"} 2024-12-11T23:06:06,136 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T23:06:06,137 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {d462fa545078=0} racks are {/default-rack=0} 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:06:06,138 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:06:06,138 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:06:06,138 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:06:06,138 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:06:06,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=594a3148205895043d8eee980c286c18, ASSIGN}] 2024-12-11T23:06:06,140 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=594a3148205895043d8eee980c286c18, ASSIGN 2024-12-11T23:06:06,142 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=594a3148205895043d8eee980c286c18, ASSIGN; state=OFFLINE, location=d462fa545078,37223,1733958364809; forceNewPlan=false, retain=false 2024-12-11T23:06:06,178 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T23:06:06,179 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T23:06:06,181 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T23:06:06,181 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T23:06:06,182 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T23:06:06,182 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T23:06:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:06:06,293 INFO [d462fa545078:41025 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T23:06:06,294 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=594a3148205895043d8eee980c286c18, regionState=OPENING, regionLocation=d462fa545078,37223,1733958364809 2024-12-11T23:06:06,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=594a3148205895043d8eee980c286c18, ASSIGN because future has completed 2024-12-11T23:06:06,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 594a3148205895043d8eee980c286c18, server=d462fa545078,37223,1733958364809}] 2024-12-11T23:06:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:06:06,457 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:06:06,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36105, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:06:06,470 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,470 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 594a3148205895043d8eee980c286c18, NAME => 'TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18.', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:06:06,471 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,471 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:06:06,471 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,471 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,473 INFO [StoreOpener-594a3148205895043d8eee980c286c18-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,475 INFO [StoreOpener-594a3148205895043d8eee980c286c18-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 594a3148205895043d8eee980c286c18 columnFamilyName cf 2024-12-11T23:06:06,475 DEBUG [StoreOpener-594a3148205895043d8eee980c286c18-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:06:06,476 INFO [StoreOpener-594a3148205895043d8eee980c286c18-1 {}] regionserver.HStore(327): Store=594a3148205895043d8eee980c286c18/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:06:06,476 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,477 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,478 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,479 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,479 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,481 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,484 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:06:06,485 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 594a3148205895043d8eee980c286c18; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67645939, jitterRate=0.00800304114818573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:06:06,485 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:06,486 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 594a3148205895043d8eee980c286c18: Running coprocessor pre-open hook at 1733958366471Writing region info on filesystem at 1733958366471Initializing all the Stores at 1733958366473 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958366473Cleaning up temporary data from old regions at 1733958366479 (+6 ms)Running coprocessor post-open hooks at 1733958366485 (+6 ms)Region opened successfully at 1733958366486 (+1 ms) 2024-12-11T23:06:06,487 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18., pid=6, masterSystemTime=1733958366457 2024-12-11T23:06:06,490 DEBUG [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,490 INFO [RS_OPEN_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,492 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=594a3148205895043d8eee980c286c18, regionState=OPEN, openSeqNum=2, regionLocation=d462fa545078,37223,1733958364809 2024-12-11T23:06:06,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 594a3148205895043d8eee980c286c18, server=d462fa545078,37223,1733958364809 because future has completed 2024-12-11T23:06:06,500 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T23:06:06,500 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 594a3148205895043d8eee980c286c18, server=d462fa545078,37223,1733958364809 in 195 msec 2024-12-11T23:06:06,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T23:06:06,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=594a3148205895043d8eee980c286c18, ASSIGN in 362 msec 2024-12-11T23:06:06,506 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:06:06,506 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958366506"}]},"ts":"1733958366506"} 2024-12-11T23:06:06,509 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T23:06:06,511 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:06:06,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 410 msec 2024-12-11T23:06:06,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T23:06:06,734 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T23:06:06,734 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T23:06:06,735 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:06:06,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T23:06:06,738 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:06:06,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T23:06:06,740 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18., hostname=d462fa545078,37223,1733958364809, seqNum=2] 2024-12-11T23:06:06,741 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:06:06,742 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:06:06,745 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T23:06:06,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T23:06:06,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:06,749 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T23:06:06,750 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T23:06:06,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T23:06:06,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:06,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37223 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T23:06:06,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,908 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 594a3148205895043d8eee980c286c18 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T23:06:06,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/.tmp/cf/a19b099a0cba45a5974b4e06465bbdcb is 36, key is row/cf:cq/1733958366743/Put/seqid=0 2024-12-11T23:06:06,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741839_1015 (size=4787) 2024-12-11T23:06:06,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741839_1015 (size=4787) 2024-12-11T23:06:06,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741839_1015 (size=4787) 2024-12-11T23:06:06,940 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/.tmp/cf/a19b099a0cba45a5974b4e06465bbdcb 2024-12-11T23:06:06,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/.tmp/cf/a19b099a0cba45a5974b4e06465bbdcb as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/cf/a19b099a0cba45a5974b4e06465bbdcb 2024-12-11T23:06:06,958 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/cf/a19b099a0cba45a5974b4e06465bbdcb, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T23:06:06,960 INFO [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 594a3148205895043d8eee980c286c18 in 52ms, sequenceid=5, compaction requested=false 2024-12-11T23:06:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 594a3148205895043d8eee980c286c18: 2024-12-11T23:06:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d462fa545078:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T23:06:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T23:06:06,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T23:06:06,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 212 msec 2024-12-11T23:06:06,971 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 223 msec 2024-12-11T23:06:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41025 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T23:06:07,065 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T23:06:07,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T23:06:07,069 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T23:06:07,069 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:07,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,070 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:06:07,070 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T23:06:07,070 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1829026373, stopped=false 2024-12-11T23:06:07,070 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d462fa545078,41025,1733958364605 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:07,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:07,188 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T23:06:07,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:07,188 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T23:06:07,189 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:07,189 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:07,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:07,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:07,190 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd462fa545078,34161,1733958364772' ***** 2024-12-11T23:06:07,190 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:06:07,190 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:06:07,190 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd462fa545078,37223,1733958364809' ***** 2024-12-11T23:06:07,190 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:06:07,190 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd462fa545078,45549,1733958364851' ***** 2024-12-11T23:06:07,191 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:06:07,191 INFO [RS:0;d462fa545078:34161 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:06:07,192 INFO [RS:2;d462fa545078:45549 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:06:07,192 INFO [RS:0;d462fa545078:34161 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:06:07,192 INFO [RS:2;d462fa545078:45549 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:06:07,192 INFO [RS:0;d462fa545078:34161 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:06:07,192 INFO [RS:1;d462fa545078:37223 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:06:07,192 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:06:07,192 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:06:07,193 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(959): stopping server d462fa545078,34161,1733958364772 2024-12-11T23:06:07,193 INFO [RS:2;d462fa545078:45549 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:06:07,192 INFO [RS:1;d462fa545078:37223 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:06:07,193 INFO [RS:0;d462fa545078:34161 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:07,192 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:06:07,193 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(959): stopping server d462fa545078,45549,1733958364851 2024-12-11T23:06:07,193 INFO [RS:1;d462fa545078:37223 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:06:07,193 INFO [RS:2;d462fa545078:45549 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:07,193 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(3091): Received CLOSE for 594a3148205895043d8eee980c286c18 2024-12-11T23:06:07,193 INFO [RS:0;d462fa545078:34161 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d462fa545078:34161. 2024-12-11T23:06:07,193 INFO [RS:2;d462fa545078:45549 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;d462fa545078:45549. 2024-12-11T23:06:07,193 DEBUG [RS:0;d462fa545078:34161 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:07,193 DEBUG [RS:2;d462fa545078:45549 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:07,193 DEBUG [RS:0;d462fa545078:34161 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,193 DEBUG [RS:2;d462fa545078:45549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,193 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(959): stopping server d462fa545078,37223,1733958364809 2024-12-11T23:06:07,193 INFO [RS:1;d462fa545078:37223 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:07,193 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(976): stopping server d462fa545078,45549,1733958364851; all regions closed. 2024-12-11T23:06:07,193 INFO [RS:0;d462fa545078:34161 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:06:07,193 INFO [RS:1;d462fa545078:37223 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;d462fa545078:37223. 2024-12-11T23:06:07,193 INFO [RS:0;d462fa545078:34161 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:06:07,193 INFO [RS:0;d462fa545078:34161 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:06:07,193 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 594a3148205895043d8eee980c286c18, disabling compactions & flushes 2024-12-11T23:06:07,193 DEBUG [RS:1;d462fa545078:37223 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:07,194 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T23:06:07,194 INFO [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:07,194 DEBUG [RS:1;d462fa545078:37223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:07,194 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T23:06:07,194 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. after waiting 0 ms 2024-12-11T23:06:07,194 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1325): Online Regions={594a3148205895043d8eee980c286c18=TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18.} 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:07,194 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,194 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T23:06:07,194 DEBUG [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1351): Waiting on 594a3148205895043d8eee980c286c18 2024-12-11T23:06:07,194 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T23:06:07,194 DEBUG [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T23:06:07,194 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,194 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T23:06:07,194 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T23:06:07,194 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T23:06:07,194 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T23:06:07,195 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T23:06:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741833_1009 (size=93) 2024-12-11T23:06:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741833_1009 (size=93) 2024-12-11T23:06:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741833_1009 (size=93) 2024-12-11T23:06:07,201 DEBUG [RS:2;d462fa545078:45549 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs 2024-12-11T23:06:07,201 INFO [RS:2;d462fa545078:45549 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d462fa545078%2C45549%2C1733958364851:(num 1733958365561) 2024-12-11T23:06:07,201 DEBUG [RS:2;d462fa545078:45549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:07,202 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/default/TestHBaseWalOnEC/594a3148205895043d8eee980c286c18/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] hbase.ChoreService(370): Chore service for: regionserver/d462fa545078:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:06:07,202 INFO [regionserver/d462fa545078:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:07,202 INFO [RS:2;d462fa545078:45549 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45549 2024-12-11T23:06:07,203 INFO [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:07,203 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 594a3148205895043d8eee980c286c18: Waiting for close lock at 1733958367193Running coprocessor pre-close hooks at 1733958367193Disabling compacts and flushes for region at 1733958367193Disabling writes for close at 1733958367194 (+1 ms)Writing region close event to WAL at 1733958367197 (+3 ms)Running coprocessor post-close hooks at 1733958367202 (+5 ms)Closed at 1733958367203 (+1 ms) 2024-12-11T23:06:07,203 DEBUG [RS_CLOSE_REGION-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18. 2024-12-11T23:06:07,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:07,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d462fa545078,45549,1733958364851 2024-12-11T23:06:07,208 INFO [RS:2;d462fa545078:45549 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:07,213 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/info/f2a3f919cea14c8a8e733169ed186021 is 153, key is TestHBaseWalOnEC,,1733958366101.594a3148205895043d8eee980c286c18./info:regioninfo/1733958366491/Put/seqid=0 2024-12-11T23:06:07,218 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d462fa545078,45549,1733958364851] 2024-12-11T23:06:07,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741840_1016 (size=6637) 2024-12-11T23:06:07,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741840_1016 (size=6637) 2024-12-11T23:06:07,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741840_1016 (size=6637) 2024-12-11T23:06:07,220 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/info/f2a3f919cea14c8a8e733169ed186021 2024-12-11T23:06:07,220 INFO [regionserver/d462fa545078:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:07,220 INFO [regionserver/d462fa545078:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:07,227 INFO [regionserver/d462fa545078:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:07,229 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d462fa545078,45549,1733958364851 already deleted, retry=false 2024-12-11T23:06:07,229 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d462fa545078,45549,1733958364851 expired; onlineServers=2 2024-12-11T23:06:07,243 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/ns/cf32ae6d8bfb4e94920dc3b930f2f4e6 is 43, key is default/ns:d/1733958365978/Put/seqid=0 2024-12-11T23:06:07,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741841_1017 (size=5153) 2024-12-11T23:06:07,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741841_1017 (size=5153) 2024-12-11T23:06:07,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741841_1017 (size=5153) 2024-12-11T23:06:07,250 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/ns/cf32ae6d8bfb4e94920dc3b930f2f4e6 2024-12-11T23:06:07,273 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/table/31a8311c094449e0bfd0687891ef5f82 is 52, key is TestHBaseWalOnEC/table:state/1733958366506/Put/seqid=0 2024-12-11T23:06:07,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741842_1018 (size=5249) 2024-12-11T23:06:07,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741842_1018 (size=5249) 2024-12-11T23:06:07,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741842_1018 (size=5249) 2024-12-11T23:06:07,280 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/table/31a8311c094449e0bfd0687891ef5f82 2024-12-11T23:06:07,287 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/info/f2a3f919cea14c8a8e733169ed186021 as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/info/f2a3f919cea14c8a8e733169ed186021 2024-12-11T23:06:07,294 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/info/f2a3f919cea14c8a8e733169ed186021, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T23:06:07,295 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/ns/cf32ae6d8bfb4e94920dc3b930f2f4e6 as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/ns/cf32ae6d8bfb4e94920dc3b930f2f4e6 2024-12-11T23:06:07,301 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/ns/cf32ae6d8bfb4e94920dc3b930f2f4e6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T23:06:07,302 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/.tmp/table/31a8311c094449e0bfd0687891ef5f82 as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/table/31a8311c094449e0bfd0687891ef5f82 2024-12-11T23:06:07,309 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/table/31a8311c094449e0bfd0687891ef5f82, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T23:06:07,310 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false 2024-12-11T23:06:07,315 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T23:06:07,316 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T23:06:07,316 INFO [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T23:06:07,317 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733958367194Running coprocessor pre-close hooks at 1733958367194Disabling compacts and flushes for region at 1733958367194Disabling writes for close at 1733958367194Obtaining lock to block concurrent updates at 1733958367195 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733958367195Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733958367195Flushing stores of hbase:meta,,1.1588230740 at 1733958367196 (+1 ms)Flushing 1588230740/info: creating writer at 1733958367196Flushing 1588230740/info: appending metadata at 1733958367212 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733958367212Flushing 1588230740/ns: creating writer at 1733958367227 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733958367243 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733958367243Flushing 1588230740/table: creating writer at 1733958367258 (+15 ms)Flushing 1588230740/table: appending metadata at 1733958367272 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733958367272Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fb0eaba: reopening flushed file at 1733958367286 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1220ccfc: reopening flushed file at 1733958367294 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fc44135: reopening flushed file at 1733958367301 (+7 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 116ms, sequenceid=11, compaction requested=false at 1733958367310 (+9 ms)Writing region close event to WAL at 1733958367311 (+1 ms)Running coprocessor post-close hooks at 1733958367316 (+5 ms)Closed at 1733958367316 2024-12-11T23:06:07,317 DEBUG [RS_CLOSE_META-regionserver/d462fa545078:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T23:06:07,318 INFO [RS:2;d462fa545078:45549 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:07,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45549-0x1001720f46e0003, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,318 INFO [RS:2;d462fa545078:45549 {}] regionserver.HRegionServer(1031): Exiting; stopping=d462fa545078,45549,1733958364851; zookeeper connection closed. 2024-12-11T23:06:07,319 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@71fbabec {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@71fbabec 2024-12-11T23:06:07,394 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(976): stopping server d462fa545078,37223,1733958364809; all regions closed. 2024-12-11T23:06:07,394 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(976): stopping server d462fa545078,34161,1733958364772; all regions closed. 2024-12-11T23:06:07,395 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,395 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741836_1012 (size=2751) 2024-12-11T23:06:07,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741835_1011 (size=1298) 2024-12-11T23:06:07,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741836_1012 (size=2751) 2024-12-11T23:06:07,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741836_1012 (size=2751) 2024-12-11T23:06:07,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741835_1011 (size=1298) 2024-12-11T23:06:07,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741835_1011 (size=1298) 2024-12-11T23:06:07,401 DEBUG [RS:1;d462fa545078:37223 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs 2024-12-11T23:06:07,401 INFO [RS:1;d462fa545078:37223 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d462fa545078%2C37223%2C1733958364809:(num 1733958365564) 2024-12-11T23:06:07,401 DEBUG [RS:1;d462fa545078:37223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:07,402 DEBUG [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:07,402 INFO [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d462fa545078%2C34161%2C1733958364772.meta:.meta(num 1733958365915) 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] hbase.ChoreService(370): Chore service for: regionserver/d462fa545078:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:07,402 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:06:07,402 INFO [regionserver/d462fa545078:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:07,402 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,402 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,402 INFO [RS:1;d462fa545078:37223 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37223 2024-12-11T23:06:07,402 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,403 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741834_1010 (size=93) 2024-12-11T23:06:07,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741834_1010 (size=93) 2024-12-11T23:06:07,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741834_1010 (size=93) 2024-12-11T23:06:07,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:07,408 INFO [RS:1;d462fa545078:37223 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:07,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d462fa545078,37223,1733958364809 2024-12-11T23:06:07,408 DEBUG [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/oldWALs 2024-12-11T23:06:07,408 INFO [RS:0;d462fa545078:34161 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d462fa545078%2C34161%2C1733958364772:(num 1733958365564) 2024-12-11T23:06:07,408 DEBUG [RS:0;d462fa545078:34161 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:06:07,409 INFO [RS:0;d462fa545078:34161 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:06:07,409 INFO [RS:0;d462fa545078:34161 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:07,409 INFO [RS:0;d462fa545078:34161 {}] hbase.ChoreService(370): Chore service for: regionserver/d462fa545078:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:07,409 INFO [RS:0;d462fa545078:34161 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:07,409 INFO [regionserver/d462fa545078:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:07,409 INFO [RS:0;d462fa545078:34161 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34161 2024-12-11T23:06:07,418 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d462fa545078,37223,1733958364809] 2024-12-11T23:06:07,429 INFO [RS:0;d462fa545078:34161 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:07,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:06:07,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d462fa545078,34161,1733958364772 2024-12-11T23:06:07,429 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007f263c8f8290@53dfe973 rejected from java.util.concurrent.ThreadPoolExecutor@57e067d0[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-11T23:06:07,439 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d462fa545078,37223,1733958364809 already deleted, retry=false 2024-12-11T23:06:07,439 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d462fa545078,37223,1733958364809 expired; onlineServers=1 2024-12-11T23:06:07,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d462fa545078,34161,1733958364772] 2024-12-11T23:06:07,460 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d462fa545078,34161,1733958364772 already deleted, retry=false 2024-12-11T23:06:07,460 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d462fa545078,34161,1733958364772 expired; onlineServers=0 2024-12-11T23:06:07,460 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd462fa545078,41025,1733958364605' ***** 2024-12-11T23:06:07,460 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T23:06:07,461 INFO [M:0;d462fa545078:41025 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:06:07,461 INFO [M:0;d462fa545078:41025 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:06:07,461 DEBUG [M:0;d462fa545078:41025 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T23:06:07,461 DEBUG [M:0;d462fa545078:41025 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T23:06:07,461 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T23:06:07,461 DEBUG [master/d462fa545078:0:becomeActiveMaster-HFileCleaner.large.0-1733958365259 {}] cleaner.HFileCleaner(306): Exit Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.large.0-1733958365259,5,FailOnTimeoutGroup] 2024-12-11T23:06:07,461 DEBUG [master/d462fa545078:0:becomeActiveMaster-HFileCleaner.small.0-1733958365262 {}] cleaner.HFileCleaner(306): Exit Thread[master/d462fa545078:0:becomeActiveMaster-HFileCleaner.small.0-1733958365262,5,FailOnTimeoutGroup] 2024-12-11T23:06:07,461 INFO [M:0;d462fa545078:41025 {}] hbase.ChoreService(370): Chore service for: master/d462fa545078:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T23:06:07,461 INFO [M:0;d462fa545078:41025 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:06:07,461 DEBUG [M:0;d462fa545078:41025 {}] master.HMaster(1795): Stopping service threads 2024-12-11T23:06:07,461 INFO [M:0;d462fa545078:41025 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T23:06:07,462 INFO [M:0;d462fa545078:41025 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T23:06:07,462 INFO [M:0;d462fa545078:41025 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T23:06:07,462 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T23:06:07,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T23:06:07,471 DEBUG [M:0;d462fa545078:41025 {}] zookeeper.ZKUtil(347): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T23:06:07,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:06:07,471 WARN [M:0;d462fa545078:41025 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T23:06:07,472 INFO [M:0;d462fa545078:41025 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/.lastflushedseqids 2024-12-11T23:06:07,474 WARN [IPC Server handler 3 on default port 45825 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T23:06:07,474 WARN [IPC Server handler 3 on default port 45825 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T23:06:07,474 WARN [IPC Server handler 3 on default port 45825 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T23:06:07,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741843_1019 (size=127) 2024-12-11T23:06:07,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741843_1019 (size=127) 2024-12-11T23:06:07,479 INFO [M:0;d462fa545078:41025 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T23:06:07,480 INFO [M:0;d462fa545078:41025 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T23:06:07,480 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T23:06:07,480 INFO [M:0;d462fa545078:41025 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:07,480 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:07,480 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T23:06:07,480 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:07,480 INFO [M:0;d462fa545078:41025 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-11T23:06:07,499 DEBUG [M:0;d462fa545078:41025 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8737e17a4f4144688394870a56089c8f is 82, key is hbase:meta,,1/info:regioninfo/1733958365951/Put/seqid=0 2024-12-11T23:06:07,501 WARN [IPC Server handler 2 on default port 45825 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T23:06:07,501 WARN [IPC Server handler 2 on default port 45825 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T23:06:07,501 WARN [IPC Server handler 2 on default port 45825 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T23:06:07,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741844_1020 (size=5672) 2024-12-11T23:06:07,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741844_1020 (size=5672) 2024-12-11T23:06:07,506 INFO [M:0;d462fa545078:41025 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8737e17a4f4144688394870a56089c8f 2024-12-11T23:06:07,519 INFO [RS:1;d462fa545078:37223 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:07,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37223-0x1001720f46e0002, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,519 INFO [RS:1;d462fa545078:37223 {}] regionserver.HRegionServer(1031): Exiting; stopping=d462fa545078,37223,1733958364809; zookeeper connection closed. 2024-12-11T23:06:07,519 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4ec228b9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4ec228b9 2024-12-11T23:06:07,528 DEBUG [M:0;d462fa545078:41025 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/65fb22b445ee4cb4b285b6919b71d357 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733958366513/Put/seqid=0 2024-12-11T23:06:07,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741845_1021 (size=6438) 2024-12-11T23:06:07,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741845_1021 (size=6438) 2024-12-11T23:06:07,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741845_1021 (size=6438) 2024-12-11T23:06:07,536 INFO [M:0;d462fa545078:41025 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/65fb22b445ee4cb4b285b6919b71d357 2024-12-11T23:06:07,550 INFO [RS:0;d462fa545078:34161 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:07,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34161-0x1001720f46e0001, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,550 INFO [RS:0;d462fa545078:34161 {}] regionserver.HRegionServer(1031): Exiting; stopping=d462fa545078,34161,1733958364772; zookeeper connection closed. 2024-12-11T23:06:07,550 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3b827f6b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3b827f6b 2024-12-11T23:06:07,551 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T23:06:07,558 DEBUG [M:0;d462fa545078:41025 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce7c535e856843ee9aebfd6f8e2a64c5 is 69, key is d462fa545078,34161,1733958364772/rs:state/1733958365332/Put/seqid=0 2024-12-11T23:06:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741846_1022 (size=5294) 2024-12-11T23:06:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741846_1022 (size=5294) 2024-12-11T23:06:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741846_1022 (size=5294) 2024-12-11T23:06:07,565 INFO [M:0;d462fa545078:41025 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce7c535e856843ee9aebfd6f8e2a64c5 2024-12-11T23:06:07,574 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8737e17a4f4144688394870a56089c8f as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8737e17a4f4144688394870a56089c8f 2024-12-11T23:06:07,583 INFO [M:0;d462fa545078:41025 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8737e17a4f4144688394870a56089c8f, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T23:06:07,584 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/65fb22b445ee4cb4b285b6919b71d357 as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/65fb22b445ee4cb4b285b6919b71d357 2024-12-11T23:06:07,592 INFO [M:0;d462fa545078:41025 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/65fb22b445ee4cb4b285b6919b71d357, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T23:06:07,594 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce7c535e856843ee9aebfd6f8e2a64c5 as hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ce7c535e856843ee9aebfd6f8e2a64c5 2024-12-11T23:06:07,602 INFO [M:0;d462fa545078:41025 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45825/user/jenkins/test-data/71e03a5b-847b-9b17-7183-257bd3965c52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ce7c535e856843ee9aebfd6f8e2a64c5, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T23:06:07,604 INFO [M:0;d462fa545078:41025 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=72, compaction requested=false 2024-12-11T23:06:07,606 INFO [M:0;d462fa545078:41025 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:06:07,606 DEBUG [M:0;d462fa545078:41025 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733958367480Disabling compacts and flushes for region at 1733958367480Disabling writes for close at 1733958367480Obtaining lock to block concurrent updates at 1733958367480Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733958367480Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733958367481 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733958367482 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733958367482Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733958367499 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733958367499Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733958367513 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733958367528 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733958367528Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733958367544 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733958367558 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733958367558Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5222a845: reopening flushed file at 1733958367573 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16b9ccf9: reopening flushed file at 1733958367583 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44e2e330: reopening flushed file at 1733958367592 (+9 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=72, compaction requested=false at 1733958367604 (+12 ms)Writing region close event to WAL at 1733958367606 (+2 ms)Closed at 1733958367606 2024-12-11T23:06:07,606 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,606 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,606 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,606 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,606 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T23:06:07,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36109 is added to blk_1073741830_1006 (size=32665) 2024-12-11T23:06:07,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37845 is added to blk_1073741830_1006 (size=32665) 2024-12-11T23:06:07,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45409 is added to blk_1073741830_1006 (size=32665) 2024-12-11T23:06:07,610 INFO [M:0;d462fa545078:41025 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T23:06:07,610 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:06:07,610 INFO [M:0;d462fa545078:41025 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41025 2024-12-11T23:06:07,610 INFO [M:0;d462fa545078:41025 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:06:07,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,718 INFO [M:0;d462fa545078:41025 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:06:07,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41025-0x1001720f46e0000, quorum=127.0.0.1:49347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:06:07,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b49a11e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:07,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@465cf8e8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:07,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:07,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5422797{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:07,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eb20002{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:07,725 WARN [BP-529397267-172.17.0.2-1733958362006 heartbeating to localhost/127.0.0.1:45825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:06:07,725 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:06:07,725 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:06:07,725 WARN [BP-529397267-172.17.0.2-1733958362006 heartbeating to localhost/127.0.0.1:45825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-529397267-172.17.0.2-1733958362006 (Datanode Uuid 0400299d-6aef-4ca3-bb0b-086f4671412c) service to localhost/127.0.0.1:45825 2024-12-11T23:06:07,726 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data5/current/BP-529397267-172.17.0.2-1733958362006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:07,727 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data6/current/BP-529397267-172.17.0.2-1733958362006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:07,727 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:06:07,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@577871b5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:07,731 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@211473f7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:07,731 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:07,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43ceb76a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:07,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44b888f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:07,733 WARN [BP-529397267-172.17.0.2-1733958362006 heartbeating to localhost/127.0.0.1:45825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:06:07,733 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:06:07,733 WARN [BP-529397267-172.17.0.2-1733958362006 heartbeating to localhost/127.0.0.1:45825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-529397267-172.17.0.2-1733958362006 (Datanode Uuid 2eac4407-729b-4c54-b584-649af68dbefd) service to localhost/127.0.0.1:45825 2024-12-11T23:06:07,733 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:06:07,734 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data3/current/BP-529397267-172.17.0.2-1733958362006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:07,734 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data4/current/BP-529397267-172.17.0.2-1733958362006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:07,735 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:06:07,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@798a0a47{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:06:07,737 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10e8516e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:07,737 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:07,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c42eb95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:07,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f37eae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:07,744 WARN [BP-529397267-172.17.0.2-1733958362006 heartbeating to localhost/127.0.0.1:45825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:06:07,744 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:06:07,744 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:06:07,744 WARN [BP-529397267-172.17.0.2-1733958362006 heartbeating to localhost/127.0.0.1:45825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-529397267-172.17.0.2-1733958362006 (Datanode Uuid 7b9ee8bb-91c3-43f3-968e-1d01661ae6fe) service to localhost/127.0.0.1:45825 2024-12-11T23:06:07,744 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data1/current/BP-529397267-172.17.0.2-1733958362006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:07,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/cluster_bd64fffc-9a63-aa3e-8136-145a3a917bc8/data/data2/current/BP-529397267-172.17.0.2-1733958362006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:06:07,745 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:06:07,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3306e0d7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T23:06:07,750 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@28dc8530{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:06:07,750 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:06:07,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce053d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:06:07,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43c9e816{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acfb63bd-822a-f905-a05e-700328eb95b8/hadoop.log.dir/,STOPPED} 2024-12-11T23:06:07,757 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T23:06:07,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T23:06:07,787 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=155 (was 93) - Thread LEAK? -, OpenFileDescriptor=518 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=365 (was 362) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9296 (was 9494)