2024-12-12 05:43:03,911 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-12 05:43:03,922 main DEBUG Took 0.008815 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 05:43:03,922 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 05:43:03,922 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 05:43:03,923 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 05:43:03,924 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,932 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 05:43:03,948 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,949 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,950 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,950 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,950 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,950 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,951 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,951 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,952 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,952 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,953 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,953 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,953 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,953 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,954 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,954 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,954 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,955 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,955 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,955 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,956 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,956 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,956 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,956 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:43:03,957 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,957 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 05:43:03,958 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:43:03,959 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 05:43:03,961 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 05:43:03,961 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 05:43:03,962 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 05:43:03,963 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 05:43:03,970 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 05:43:03,973 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 05:43:03,974 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 05:43:03,974 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 05:43:03,975 main DEBUG createAppenders(={Console}) 2024-12-12 05:43:03,976 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-12 05:43:03,976 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-12 05:43:03,976 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-12 05:43:03,976 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 05:43:03,977 main DEBUG OutputStream closed 2024-12-12 05:43:03,977 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 05:43:03,977 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 05:43:03,977 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-12 05:43:04,038 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 05:43:04,040 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 05:43:04,041 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 05:43:04,042 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 05:43:04,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 05:43:04,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 05:43:04,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 05:43:04,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 05:43:04,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 05:43:04,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 05:43:04,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 05:43:04,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 05:43:04,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 05:43:04,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 05:43:04,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 05:43:04,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 05:43:04,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 05:43:04,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 05:43:04,048 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 05:43:04,049 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-12 05:43:04,049 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 05:43:04,050 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-12T05:43:04,064 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-12 05:43:04,066 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 05:43:04,066 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T05:43:04,263 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7 2024-12-12T05:43:04,285 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8, deleteOnExit=true 2024-12-12T05:43:04,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/test.cache.data in system properties and HBase conf 2024-12-12T05:43:04,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T05:43:04,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir in system properties and HBase conf 2024-12-12T05:43:04,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T05:43:04,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T05:43:04,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-12T05:43:04,372 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T05:43:04,452 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T05:43:04,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T05:43:04,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T05:43:04,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T05:43:04,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T05:43:04,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T05:43:04,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T05:43:04,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T05:43:04,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T05:43:04,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T05:43:04,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/nfs.dump.dir in system properties and HBase conf 2024-12-12T05:43:04,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/java.io.tmpdir in system properties and HBase conf 2024-12-12T05:43:04,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T05:43:04,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T05:43:04,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T05:43:05,388 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T05:43:05,451 INFO [Time-limited test {}] log.Log(170): Logging initialized @2077ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T05:43:05,512 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:05,565 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:05,582 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:05,582 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:05,583 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T05:43:05,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:05,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:05,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:05,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/java.io.tmpdir/jetty-localhost-42725-hadoop-hdfs-3_4_1-tests_jar-_-any-9088131388934945944/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T05:43:05,762 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:42725} 2024-12-12T05:43:05,762 INFO [Time-limited test {}] server.Server(415): Started @2389ms 2024-12-12T05:43:06,252 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:06,259 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:06,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:06,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:06,260 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T05:43:06,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d0819de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:06,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54f91ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:06,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d1a7cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/java.io.tmpdir/jetty-localhost-45271-hadoop-hdfs-3_4_1-tests_jar-_-any-11955519668790015594/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:06,356 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20b70ca3{HTTP/1.1, (http/1.1)}{localhost:45271} 2024-12-12T05:43:06,356 INFO [Time-limited test {}] server.Server(415): Started @2983ms 2024-12-12T05:43:06,399 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:43:06,496 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:06,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:06,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:06,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:06,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T05:43:06,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37e44dc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:06,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fb4f3a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:06,609 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1548acd1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/java.io.tmpdir/jetty-localhost-33787-hadoop-hdfs-3_4_1-tests_jar-_-any-15832350504664279951/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:06,609 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dc20694{HTTP/1.1, (http/1.1)}{localhost:33787} 2024-12-12T05:43:06,610 INFO [Time-limited test {}] server.Server(415): Started @3236ms 2024-12-12T05:43:06,612 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:43:06,642 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:06,647 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:06,648 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:06,648 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:06,649 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T05:43:06,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@656f7043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:06,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c0da3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:06,746 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3297a183{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/java.io.tmpdir/jetty-localhost-34767-hadoop-hdfs-3_4_1-tests_jar-_-any-9571135164422561182/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:06,748 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b8a83a2{HTTP/1.1, (http/1.1)}{localhost:34767} 2024-12-12T05:43:06,748 INFO [Time-limited test {}] server.Server(415): Started @3375ms 2024-12-12T05:43:06,750 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:43:07,686 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data3/current/BP-1389929639-172.17.0.2-1733982184928/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:07,686 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data1/current/BP-1389929639-172.17.0.2-1733982184928/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:07,686 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data4/current/BP-1389929639-172.17.0.2-1733982184928/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:07,686 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data2/current/BP-1389929639-172.17.0.2-1733982184928/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:07,712 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:43:07,712 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:43:07,729 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data5/current/BP-1389929639-172.17.0.2-1733982184928/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:07,729 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data6/current/BP-1389929639-172.17.0.2-1733982184928/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:07,757 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb64e65e986a1cf3a with lease ID 0xf3824db32a1e63d: Processing first storage report for DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e from datanode DatanodeRegistration(127.0.0.1:33191, datanodeUuid=7969e3e7-c778-41c1-b5c4-b84e08e6e234, infoPort=46739, infoSecurePort=0, ipcPort=40705, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928) 2024-12-12T05:43:07,757 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:43:07,758 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb64e65e986a1cf3a with lease ID 0xf3824db32a1e63d: from storage DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e node DatanodeRegistration(127.0.0.1:33191, datanodeUuid=7969e3e7-c778-41c1-b5c4-b84e08e6e234, infoPort=46739, infoSecurePort=0, ipcPort=40705, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T05:43:07,758 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4ea4fa06ab3c365 with lease ID 0xf3824db32a1e63e: Processing first storage report for DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7 from datanode DatanodeRegistration(127.0.0.1:38943, datanodeUuid=c227c6b4-17df-4e8f-8bfd-f6cbf1de9c03, infoPort=46255, infoSecurePort=0, ipcPort=45169, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928) 2024-12-12T05:43:07,758 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4ea4fa06ab3c365 with lease ID 0xf3824db32a1e63e: from storage DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7 node DatanodeRegistration(127.0.0.1:38943, datanodeUuid=c227c6b4-17df-4e8f-8bfd-f6cbf1de9c03, infoPort=46255, infoSecurePort=0, ipcPort=45169, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:07,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb64e65e986a1cf3a with lease ID 0xf3824db32a1e63d: Processing first storage report for DS-4ccc6523-6ae1-42e0-a995-2f74d96cdd96 from datanode DatanodeRegistration(127.0.0.1:33191, datanodeUuid=7969e3e7-c778-41c1-b5c4-b84e08e6e234, infoPort=46739, infoSecurePort=0, ipcPort=40705, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928) 2024-12-12T05:43:07,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb64e65e986a1cf3a with lease ID 0xf3824db32a1e63d: from storage DS-4ccc6523-6ae1-42e0-a995-2f74d96cdd96 node DatanodeRegistration(127.0.0.1:33191, datanodeUuid=7969e3e7-c778-41c1-b5c4-b84e08e6e234, infoPort=46739, infoSecurePort=0, ipcPort=40705, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:07,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4ea4fa06ab3c365 with lease ID 0xf3824db32a1e63e: Processing first storage report for DS-9a78e849-8f41-43a2-a934-5ac32b05cbff from datanode DatanodeRegistration(127.0.0.1:38943, datanodeUuid=c227c6b4-17df-4e8f-8bfd-f6cbf1de9c03, infoPort=46255, infoSecurePort=0, ipcPort=45169, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928) 2024-12-12T05:43:07,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4ea4fa06ab3c365 with lease ID 0xf3824db32a1e63e: from storage DS-9a78e849-8f41-43a2-a934-5ac32b05cbff node DatanodeRegistration(127.0.0.1:38943, datanodeUuid=c227c6b4-17df-4e8f-8bfd-f6cbf1de9c03, infoPort=46255, infoSecurePort=0, ipcPort=45169, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:07,763 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e9489b0fcec53b1 with lease ID 0xf3824db32a1e63f: Processing first storage report for DS-2c630ba7-2833-4220-b393-aaa6265a4135 from datanode DatanodeRegistration(127.0.0.1:43639, datanodeUuid=b556d465-4299-4bd3-b90e-6bc00d999d14, infoPort=43201, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928) 2024-12-12T05:43:07,763 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e9489b0fcec53b1 with lease ID 0xf3824db32a1e63f: from storage DS-2c630ba7-2833-4220-b393-aaa6265a4135 node DatanodeRegistration(127.0.0.1:43639, datanodeUuid=b556d465-4299-4bd3-b90e-6bc00d999d14, infoPort=43201, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T05:43:07,763 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e9489b0fcec53b1 with lease ID 0xf3824db32a1e63f: Processing first storage report for DS-6c3c39a8-7b90-4505-8732-ad50b170c5df from datanode DatanodeRegistration(127.0.0.1:43639, datanodeUuid=b556d465-4299-4bd3-b90e-6bc00d999d14, infoPort=43201, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928) 2024-12-12T05:43:07,763 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e9489b0fcec53b1 with lease ID 0xf3824db32a1e63f: from storage DS-6c3c39a8-7b90-4505-8732-ad50b170c5df node DatanodeRegistration(127.0.0.1:43639, datanodeUuid=b556d465-4299-4bd3-b90e-6bc00d999d14, infoPort=43201, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=601652731;c=1733982184928), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:07,834 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7 2024-12-12T05:43:07,897 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-12T05:43:07,942 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=298, ProcessCount=11, AvailableMemoryMB=9817 2024-12-12T05:43:07,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T05:43:07,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-12T05:43:08,026 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/zookeeper_0, clientPort=51134, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T05:43:08,037 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51134 2024-12-12T05:43:08,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:08,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:08,145 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:08,146 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:08,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:60132 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60132 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:08,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-12T05:43:08,606 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:08,619 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec with version=8 2024-12-12T05:43:08,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/hbase-staging 2024-12-12T05:43:08,700 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T05:43:08,925 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:08,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:08,934 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:08,938 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:08,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:08,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:09,056 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-12T05:43:09,110 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T05:43:09,118 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T05:43:09,122 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:09,145 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 6953 (auto-detected) 2024-12-12T05:43:09,146 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T05:43:09,164 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33367 2024-12-12T05:43:09,181 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33367 connecting to ZooKeeper ensemble=127.0.0.1:51134 2024-12-12T05:43:09,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:333670x0, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:09,284 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33367-0x100189a30850000 connected 2024-12-12T05:43:09,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:09,381 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec, hbase.cluster.distributed=false 2024-12-12T05:43:09,401 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:09,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33367 2024-12-12T05:43:09,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33367 2024-12-12T05:43:09,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33367 2024-12-12T05:43:09,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33367 2024-12-12T05:43:09,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33367 2024-12-12T05:43:09,498 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:09,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,499 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,500 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:09,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:09,502 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:43:09,504 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:09,505 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45027 2024-12-12T05:43:09,507 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45027 connecting to ZooKeeper ensemble=127.0.0.1:51134 2024-12-12T05:43:09,508 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,540 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450270x0, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:09,541 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:450270x0, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:09,541 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45027-0x100189a30850001 connected 2024-12-12T05:43:09,547 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:43:09,555 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:43:09,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:43:09,563 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:09,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45027 2024-12-12T05:43:09,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45027 2024-12-12T05:43:09,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45027 2024-12-12T05:43:09,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45027 2024-12-12T05:43:09,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45027 2024-12-12T05:43:09,581 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:09,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,582 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:09,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:09,582 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:43:09,582 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:09,584 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46565 2024-12-12T05:43:09,586 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46565 connecting to ZooKeeper ensemble=127.0.0.1:51134 2024-12-12T05:43:09,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,598 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:465650x0, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:09,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46565-0x100189a30850002 connected 2024-12-12T05:43:09,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:09,599 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:43:09,600 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:43:09,601 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:43:09,603 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:09,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46565 2024-12-12T05:43:09,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46565 2024-12-12T05:43:09,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46565 2024-12-12T05:43:09,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46565 2024-12-12T05:43:09,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46565 2024-12-12T05:43:09,623 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:09,623 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,623 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,624 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:09,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:09,624 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:09,624 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:43:09,625 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:09,625 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39003 2024-12-12T05:43:09,627 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39003 connecting to ZooKeeper ensemble=127.0.0.1:51134 2024-12-12T05:43:09,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,631 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390030x0, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:09,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390030x0, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:09,640 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39003-0x100189a30850003 connected 2024-12-12T05:43:09,641 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:43:09,642 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:43:09,642 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:43:09,644 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:09,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39003 2024-12-12T05:43:09,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39003 2024-12-12T05:43:09,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39003 2024-12-12T05:43:09,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39003 2024-12-12T05:43:09,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39003 2024-12-12T05:43:09,667 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ffbfd3107920:33367 2024-12-12T05:43:09,668 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ffbfd3107920,33367,1733982188783 2024-12-12T05:43:09,682 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,682 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ffbfd3107920,33367,1733982188783 2024-12-12T05:43:09,714 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:09,714 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:09,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:09,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,715 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,715 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,716 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T05:43:09,717 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ffbfd3107920,33367,1733982188783 from backup master directory 2024-12-12T05:43:09,723 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ffbfd3107920,33367,1733982188783 2024-12-12T05:43:09,723 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:09,724 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:09,724 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ffbfd3107920,33367,1733982188783 2024-12-12T05:43:09,726 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T05:43:09,727 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T05:43:09,779 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/hbase.id] with ID: da28258d-af29-4d8e-a4e7-5af5dbf99d22 2024-12-12T05:43:09,779 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/.tmp/hbase.id 2024-12-12T05:43:09,786 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,786 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,789 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:44002 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:38943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44002 dst: /127.0.0.1:38943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:09,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-12T05:43:09,796 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:09,796 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/.tmp/hbase.id]:[hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/hbase.id] 2024-12-12T05:43:09,841 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:09,846 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-12T05:43:09,863 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-12T05:43:09,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,890 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,890 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:09,903 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,903 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:44026 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:38943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44026 dst: /127.0.0.1:38943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:09,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-12T05:43:09,912 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:09,926 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:43:09,928 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T05:43:09,934 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:43:09,960 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,960 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,963 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:60154 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60154 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:09,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-12T05:43:09,969 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:09,983 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store 2024-12-12T05:43:09,998 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:09,998 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:10,001 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:60168 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60168 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:10,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-12T05:43:10,005 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:10,009 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T05:43:10,012 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:10,014 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T05:43:10,014 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:10,014 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:10,016 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T05:43:10,017 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:10,017 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:10,018 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733982190014Disabling compacts and flushes for region at 1733982190014Disabling writes for close at 1733982190017 (+3 ms)Writing region close event to WAL at 1733982190017Closed at 1733982190017 2024-12-12T05:43:10,020 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/.initializing 2024-12-12T05:43:10,021 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/WALs/ffbfd3107920,33367,1733982188783 2024-12-12T05:43:10,030 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T05:43:10,043 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C33367%2C1733982188783, suffix=, logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/WALs/ffbfd3107920,33367,1733982188783, archiveDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/oldWALs, maxLogs=10 2024-12-12T05:43:10,075 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/WALs/ffbfd3107920,33367,1733982188783/ffbfd3107920%2C33367%2C1733982188783.1733982190049, exclude list is [], retry=0 2024-12-12T05:43:10,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:10,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38943,DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7,DISK] 2024-12-12T05:43:10,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33191,DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e,DISK] 2024-12-12T05:43:10,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43639,DS-2c630ba7-2833-4220-b393-aaa6265a4135,DISK] 2024-12-12T05:43:10,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T05:43:10,133 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/WALs/ffbfd3107920,33367,1733982188783/ffbfd3107920%2C33367%2C1733982188783.1733982190049 2024-12-12T05:43:10,134 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46255:46255),(127.0.0.1/127.0.0.1:43201:43201),(127.0.0.1/127.0.0.1:46739:46739)] 2024-12-12T05:43:10,135 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:43:10,135 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:10,138 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,138 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T05:43:10,194 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:10,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T05:43:10,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:10,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T05:43:10,205 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:10,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T05:43:10,210 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,211 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:10,212 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,215 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,216 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,220 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,221 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,224 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:43:10,227 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:10,233 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:43:10,234 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63191967, jitterRate=-0.0583663135766983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:43:10,241 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733982190150Initializing all the Stores at 1733982190151 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982190152 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982190152Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982190152Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982190153 (+1 ms)Cleaning up temporary data from old regions at 1733982190221 (+68 ms)Region opened successfully at 1733982190240 (+19 ms) 2024-12-12T05:43:10,242 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T05:43:10,272 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73395edb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:10,297 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-12T05:43:10,305 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T05:43:10,306 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T05:43:10,308 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T05:43:10,309 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-12T05:43:10,313 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-12T05:43:10,313 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T05:43:10,334 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T05:43:10,344 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T05:43:10,389 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-12T05:43:10,393 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T05:43:10,396 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T05:43:10,406 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-12T05:43:10,408 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T05:43:10,411 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T05:43:10,414 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-12T05:43:10,415 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T05:43:10,422 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T05:43:10,439 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T05:43:10,447 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T05:43:10,456 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:10,456 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:10,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:10,456 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,456 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:10,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,461 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ffbfd3107920,33367,1733982188783, sessionid=0x100189a30850000, setting cluster-up flag (Was=false) 2024-12-12T05:43:10,489 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,489 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,515 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T05:43:10,520 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ffbfd3107920,33367,1733982188783 2024-12-12T05:43:10,539 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,539 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:10,564 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T05:43:10,567 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ffbfd3107920,33367,1733982188783 2024-12-12T05:43:10,575 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-12T05:43:10,637 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:10,645 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-12T05:43:10,650 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T05:43:10,652 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(746): ClusterId : da28258d-af29-4d8e-a4e7-5af5dbf99d22 2024-12-12T05:43:10,652 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(746): ClusterId : da28258d-af29-4d8e-a4e7-5af5dbf99d22 2024-12-12T05:43:10,652 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(746): ClusterId : da28258d-af29-4d8e-a4e7-5af5dbf99d22 2024-12-12T05:43:10,654 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:43:10,654 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:43:10,654 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:43:10,655 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ffbfd3107920,33367,1733982188783 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T05:43:10,684 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:43:10,684 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:43:10,684 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:43:10,684 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:43:10,684 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:10,684 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:10,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:10,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:10,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ffbfd3107920:0, corePoolSize=10, maxPoolSize=10 2024-12-12T05:43:10,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:10,685 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,686 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:43:10,686 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:43:10,696 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:10,697 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-12T05:43:10,699 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:43:10,699 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:43:10,699 DEBUG [RS:1;ffbfd3107920:46565 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fc1570, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:10,699 DEBUG [RS:2;ffbfd3107920:39003 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d6bc05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:10,702 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,702 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:43:10,703 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T05:43:10,703 DEBUG [RS:0;ffbfd3107920:45027 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28d126fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:10,713 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733982220713 2024-12-12T05:43:10,715 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T05:43:10,716 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ffbfd3107920:39003 2024-12-12T05:43:10,717 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T05:43:10,719 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:10,719 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:10,720 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T05:43:10,720 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T05:43:10,720 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T05:43:10,721 DEBUG [RS:1;ffbfd3107920:46565 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ffbfd3107920:46565 2024-12-12T05:43:10,721 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T05:43:10,722 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T05:43:10,722 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T05:43:10,722 DEBUG [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T05:43:10,722 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T05:43:10,722 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T05:43:10,722 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T05:43:10,723 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(2659): reportForDuty to master=ffbfd3107920,33367,1733982188783 with port=39003, startcode=1733982189622 2024-12-12T05:43:10,723 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(2659): reportForDuty to master=ffbfd3107920,33367,1733982188783 with port=46565, startcode=1733982189580 2024-12-12T05:43:10,725 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ffbfd3107920:45027 2024-12-12T05:43:10,725 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T05:43:10,725 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T05:43:10,725 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T05:43:10,726 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(2659): reportForDuty to master=ffbfd3107920,33367,1733982188783 with port=45027, startcode=1733982189466 2024-12-12T05:43:10,727 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,734 DEBUG [RS:2;ffbfd3107920:39003 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:43:10,734 DEBUG [RS:0;ffbfd3107920:45027 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:43:10,734 DEBUG [RS:1;ffbfd3107920:46565 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:43:10,739 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T05:43:10,740 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T05:43:10,741 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T05:43:10,741 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:47848 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:33191:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47848 dst: /127.0.0.1:33191 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:10,747 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T05:43:10,747 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T05:43:10,755 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.large.0-1733982190749,5,FailOnTimeoutGroup] 2024-12-12T05:43:10,756 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.small.0-1733982190756,5,FailOnTimeoutGroup] 2024-12-12T05:43:10,758 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-12T05:43:10,758 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T05:43:10,762 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,762 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,762 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:10,765 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-12T05:43:10,766 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec 2024-12-12T05:43:10,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-12T05:43:10,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-12T05:43:10,776 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39119, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:43:10,776 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45373, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:43:10,776 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55521, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:43:10,778 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:10,778 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:10,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:47874 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:33191:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47874 dst: /127.0.0.1:33191 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:10,787 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33367 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ffbfd3107920,46565,1733982189580 2024-12-12T05:43:10,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-12T05:43:10,790 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33367 {}] master.ServerManager(517): Registering regionserver=ffbfd3107920,46565,1733982189580 2024-12-12T05:43:10,791 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:10,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:10,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T05:43:10,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T05:43:10,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:10,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-12T05:43:10,803 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33367 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ffbfd3107920,39003,1733982189622 2024-12-12T05:43:10,803 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33367 {}] master.ServerManager(517): Registering regionserver=ffbfd3107920,39003,1733982189622 2024-12-12T05:43:10,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-12T05:43:10,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,807 DEBUG [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec 2024-12-12T05:43:10,807 DEBUG [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33553 2024-12-12T05:43:10,807 DEBUG [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T05:43:10,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:10,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T05:43:10,808 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33367 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ffbfd3107920,45027,1733982189466 2024-12-12T05:43:10,808 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec 2024-12-12T05:43:10,809 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33553 2024-12-12T05:43:10,809 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T05:43:10,809 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33367 {}] master.ServerManager(517): Registering regionserver=ffbfd3107920,45027,1733982189466 2024-12-12T05:43:10,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T05:43:10,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,813 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec 2024-12-12T05:43:10,813 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33553 2024-12-12T05:43:10,813 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T05:43:10,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:10,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T05:43:10,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T05:43:10,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:10,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:10,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-12T05:43:10,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740 2024-12-12T05:43:10,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740 2024-12-12T05:43:10,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-12T05:43:10,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-12T05:43:10,823 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:43:10,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-12T05:43:10,829 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:43:10,830 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74913868, jitterRate=0.11630362272262573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:43:10,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733982190793Initializing all the Stores at 1733982190796 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982190796Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982190796Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982190796Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982190796Cleaning up temporary data from old regions at 1733982190822 (+26 ms)Region opened successfully at 1733982190832 (+10 ms) 2024-12-12T05:43:10,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-12T05:43:10,833 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-12T05:43:10,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-12T05:43:10,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T05:43:10,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T05:43:10,834 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-12T05:43:10,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733982190833Disabling compacts and flushes for region at 1733982190833Disabling writes for close at 1733982190833Writing region close event to WAL at 1733982190834 (+1 ms)Closed at 1733982190834 2024-12-12T05:43:10,837 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:10,837 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-12T05:43:10,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:10,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T05:43:10,850 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T05:43:10,852 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T05:43:10,869 DEBUG [RS:1;ffbfd3107920:46565 {}] zookeeper.ZKUtil(111): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ffbfd3107920,46565,1733982189580 2024-12-12T05:43:10,870 WARN [RS:1;ffbfd3107920:46565 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:10,870 INFO [RS:1;ffbfd3107920:46565 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:43:10,870 DEBUG [RS:0;ffbfd3107920:45027 {}] zookeeper.ZKUtil(111): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ffbfd3107920,45027,1733982189466 2024-12-12T05:43:10,870 DEBUG [RS:2;ffbfd3107920:39003 {}] zookeeper.ZKUtil(111): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ffbfd3107920,39003,1733982189622 2024-12-12T05:43:10,870 WARN [RS:0;ffbfd3107920:45027 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:10,870 WARN [RS:2;ffbfd3107920:39003 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:10,870 DEBUG [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,46565,1733982189580 2024-12-12T05:43:10,870 INFO [RS:0;ffbfd3107920:45027 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:43:10,870 INFO [RS:2;ffbfd3107920:39003 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:43:10,870 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466 2024-12-12T05:43:10,871 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,39003,1733982189622 2024-12-12T05:43:10,872 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ffbfd3107920,39003,1733982189622] 2024-12-12T05:43:10,873 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ffbfd3107920,45027,1733982189466] 2024-12-12T05:43:10,873 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ffbfd3107920,46565,1733982189580] 2024-12-12T05:43:10,896 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:43:10,896 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:43:10,896 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:43:10,907 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:43:10,907 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:43:10,908 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:43:10,913 INFO [RS:1;ffbfd3107920:46565 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:43:10,913 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,913 INFO [RS:0;ffbfd3107920:45027 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:43:10,913 INFO [RS:2;ffbfd3107920:39003 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:43:10,913 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,913 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,915 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T05:43:10,915 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T05:43:10,915 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T05:43:10,920 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T05:43:10,920 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T05:43:10,920 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T05:43:10,922 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,922 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,922 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,922 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:10,922 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:10,922 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,922 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:10,923 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:1;ffbfd3107920:46565 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,923 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:10,924 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:10,924 DEBUG [RS:2;ffbfd3107920:39003 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:10,924 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:10,924 DEBUG [RS:0;ffbfd3107920:45027 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:10,928 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,928 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,928 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,928 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,928 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,928 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,46565,1733982189580-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:10,930 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,930 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,930 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,930 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,45027,1733982189466-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,931 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39003,1733982189622-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:10,949 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T05:43:10,949 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T05:43:10,951 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,45027,1733982189466-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,951 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39003,1733982189622-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,951 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,951 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T05:43:10,951 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,951 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.Replication(171): ffbfd3107920,45027,1733982189466 started 2024-12-12T05:43:10,951 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.Replication(171): ffbfd3107920,39003,1733982189622 started 2024-12-12T05:43:10,951 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,46565,1733982189580-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,952 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,952 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.Replication(171): ffbfd3107920,46565,1733982189580 started 2024-12-12T05:43:10,976 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,976 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,976 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:10,976 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1482): Serving as ffbfd3107920,45027,1733982189466, RpcServer on ffbfd3107920/172.17.0.2:45027, sessionid=0x100189a30850001 2024-12-12T05:43:10,976 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(1482): Serving as ffbfd3107920,46565,1733982189580, RpcServer on ffbfd3107920/172.17.0.2:46565, sessionid=0x100189a30850002 2024-12-12T05:43:10,976 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1482): Serving as ffbfd3107920,39003,1733982189622, RpcServer on ffbfd3107920/172.17.0.2:39003, sessionid=0x100189a30850003 2024-12-12T05:43:10,977 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:43:10,977 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:43:10,977 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:43:10,977 DEBUG [RS:2;ffbfd3107920:39003 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ffbfd3107920,39003,1733982189622 2024-12-12T05:43:10,977 DEBUG [RS:0;ffbfd3107920:45027 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ffbfd3107920,45027,1733982189466 2024-12-12T05:43:10,977 DEBUG [RS:1;ffbfd3107920:46565 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ffbfd3107920,46565,1733982189580 2024-12-12T05:43:10,977 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,45027,1733982189466' 2024-12-12T05:43:10,977 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,39003,1733982189622' 2024-12-12T05:43:10,977 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,46565,1733982189580' 2024-12-12T05:43:10,978 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:43:10,978 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:43:10,978 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:43:10,979 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:43:10,979 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:43:10,979 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:43:10,979 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:43:10,979 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:43:10,979 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:43:10,979 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:43:10,979 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:43:10,979 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:43:10,979 DEBUG [RS:0;ffbfd3107920:45027 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ffbfd3107920,45027,1733982189466 2024-12-12T05:43:10,979 DEBUG [RS:1;ffbfd3107920:46565 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ffbfd3107920,46565,1733982189580 2024-12-12T05:43:10,979 DEBUG [RS:2;ffbfd3107920:39003 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ffbfd3107920,39003,1733982189622 2024-12-12T05:43:10,979 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,45027,1733982189466' 2024-12-12T05:43:10,979 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,46565,1733982189580' 2024-12-12T05:43:10,979 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,39003,1733982189622' 2024-12-12T05:43:10,979 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:43:10,979 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:43:10,979 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:43:10,980 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:43:10,980 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:43:10,980 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:43:10,980 DEBUG [RS:0;ffbfd3107920:45027 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:43:10,981 INFO [RS:0;ffbfd3107920:45027 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:43:10,981 INFO [RS:0;ffbfd3107920:45027 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:43:10,981 DEBUG [RS:1;ffbfd3107920:46565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:43:10,981 DEBUG [RS:2;ffbfd3107920:39003 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:43:10,981 INFO [RS:1;ffbfd3107920:46565 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:43:10,981 INFO [RS:2;ffbfd3107920:39003 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:43:10,981 INFO [RS:1;ffbfd3107920:46565 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:43:10,981 INFO [RS:2;ffbfd3107920:39003 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:43:11,003 WARN [ffbfd3107920:33367 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-12T05:43:11,090 INFO [RS:0;ffbfd3107920:45027 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T05:43:11,090 INFO [RS:1;ffbfd3107920:46565 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T05:43:11,090 INFO [RS:2;ffbfd3107920:39003 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T05:43:11,093 INFO [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C45027%2C1733982189466, suffix=, logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466, archiveDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs, maxLogs=32 2024-12-12T05:43:11,093 INFO [RS:2;ffbfd3107920:39003 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C39003%2C1733982189622, suffix=, logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,39003,1733982189622, archiveDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs, maxLogs=32 2024-12-12T05:43:11,095 INFO [RS:1;ffbfd3107920:46565 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C46565%2C1733982189580, suffix=, logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,46565,1733982189580, archiveDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs, maxLogs=32 2024-12-12T05:43:11,108 DEBUG [RS:2;ffbfd3107920:39003 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,39003,1733982189622/ffbfd3107920%2C39003%2C1733982189622.1733982191096, exclude list is [], retry=0 2024-12-12T05:43:11,108 DEBUG [RS:1;ffbfd3107920:46565 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,46565,1733982189580/ffbfd3107920%2C46565%2C1733982189580.1733982191096, exclude list is [], retry=0 2024-12-12T05:43:11,110 DEBUG [RS:0;ffbfd3107920:45027 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466/ffbfd3107920%2C45027%2C1733982189466.1733982191096, exclude list is [], retry=0 2024-12-12T05:43:11,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43639,DS-2c630ba7-2833-4220-b393-aaa6265a4135,DISK] 2024-12-12T05:43:11,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33191,DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e,DISK] 2024-12-12T05:43:11,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38943,DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7,DISK] 2024-12-12T05:43:11,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38943,DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7,DISK] 2024-12-12T05:43:11,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33191,DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e,DISK] 2024-12-12T05:43:11,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43639,DS-2c630ba7-2833-4220-b393-aaa6265a4135,DISK] 2024-12-12T05:43:11,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38943,DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7,DISK] 2024-12-12T05:43:11,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43639,DS-2c630ba7-2833-4220-b393-aaa6265a4135,DISK] 2024-12-12T05:43:11,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33191,DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e,DISK] 2024-12-12T05:43:11,151 INFO [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466/ffbfd3107920%2C45027%2C1733982189466.1733982191096 2024-12-12T05:43:11,151 INFO [RS:2;ffbfd3107920:39003 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,39003,1733982189622/ffbfd3107920%2C39003%2C1733982189622.1733982191096 2024-12-12T05:43:11,152 DEBUG [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43201:43201),(127.0.0.1/127.0.0.1:46255:46255),(127.0.0.1/127.0.0.1:46739:46739)] 2024-12-12T05:43:11,152 DEBUG [RS:2;ffbfd3107920:39003 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46255:46255),(127.0.0.1/127.0.0.1:43201:43201),(127.0.0.1/127.0.0.1:46739:46739)] 2024-12-12T05:43:11,155 INFO [RS:1;ffbfd3107920:46565 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,46565,1733982189580/ffbfd3107920%2C46565%2C1733982189580.1733982191096 2024-12-12T05:43:11,156 DEBUG [RS:1;ffbfd3107920:46565 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46739:46739),(127.0.0.1/127.0.0.1:46255:46255),(127.0.0.1/127.0.0.1:43201:43201)] 2024-12-12T05:43:11,259 DEBUG [ffbfd3107920:33367 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-12T05:43:11,269 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(204): Hosts are {ffbfd3107920=0} racks are {/default-rack=0} 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T05:43:11,275 INFO [ffbfd3107920:33367 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T05:43:11,275 INFO [ffbfd3107920:33367 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T05:43:11,275 INFO [ffbfd3107920:33367 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T05:43:11,275 DEBUG [ffbfd3107920:33367 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T05:43:11,281 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ffbfd3107920,45027,1733982189466 2024-12-12T05:43:11,286 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ffbfd3107920,45027,1733982189466, state=OPENING 2024-12-12T05:43:11,339 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T05:43:11,348 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:11,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:11,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:11,348 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:11,350 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,350 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,354 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T05:43:11,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ffbfd3107920,45027,1733982189466}] 2024-12-12T05:43:11,531 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T05:43:11,534 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39103, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T05:43:11,545 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-12T05:43:11,546 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:43:11,546 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T05:43:11,549 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C45027%2C1733982189466.meta, suffix=.meta, logDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466, archiveDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs, maxLogs=32 2024-12-12T05:43:11,566 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466/ffbfd3107920%2C45027%2C1733982189466.meta.1733982191551.meta, exclude list is [], retry=0 2024-12-12T05:43:11,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43639,DS-2c630ba7-2833-4220-b393-aaa6265a4135,DISK] 2024-12-12T05:43:11,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38943,DS-8c41a03f-cf3c-4b28-83b2-28d905d86ef7,DISK] 2024-12-12T05:43:11,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33191,DS-ddb8856d-57f7-45c2-91fb-04818fd5a78e,DISK] 2024-12-12T05:43:11,572 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/WALs/ffbfd3107920,45027,1733982189466/ffbfd3107920%2C45027%2C1733982189466.meta.1733982191551.meta 2024-12-12T05:43:11,573 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46255:46255),(127.0.0.1/127.0.0.1:43201:43201),(127.0.0.1/127.0.0.1:46739:46739)] 2024-12-12T05:43:11,573 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:43:11,574 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T05:43:11,576 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T05:43:11,581 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T05:43:11,585 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T05:43:11,585 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:11,585 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-12T05:43:11,585 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-12T05:43:11,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T05:43:11,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T05:43:11,590 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:11,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:11,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-12T05:43:11,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-12T05:43:11,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:11,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:11,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T05:43:11,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T05:43:11,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:11,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:11,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T05:43:11,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T05:43:11,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:11,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:11,597 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-12T05:43:11,598 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740 2024-12-12T05:43:11,601 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740 2024-12-12T05:43:11,603 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-12T05:43:11,603 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-12T05:43:11,604 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:43:11,606 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-12T05:43:11,607 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65327633, jitterRate=-0.026542410254478455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:43:11,608 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-12T05:43:11,609 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733982191586Writing region info on filesystem at 1733982191586Initializing all the Stores at 1733982191588 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982191588Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982191588Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982191588Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982191588Cleaning up temporary data from old regions at 1733982191603 (+15 ms)Running coprocessor post-open hooks at 1733982191608 (+5 ms)Region opened successfully at 1733982191609 (+1 ms) 2024-12-12T05:43:11,615 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733982191522 2024-12-12T05:43:11,626 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T05:43:11,626 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-12T05:43:11,628 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ffbfd3107920,45027,1733982189466 2024-12-12T05:43:11,630 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ffbfd3107920,45027,1733982189466, state=OPEN 2024-12-12T05:43:11,664 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:11,664 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:11,664 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,664 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,664 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,664 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:11,665 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ffbfd3107920,45027,1733982189466 2024-12-12T05:43:11,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T05:43:11,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ffbfd3107920,45027,1733982189466 in 309 msec 2024-12-12T05:43:11,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T05:43:11,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 834 msec 2024-12-12T05:43:11,682 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:11,682 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-12T05:43:11,699 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T05:43:11,700 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ffbfd3107920,45027,1733982189466, seqNum=-1] 2024-12-12T05:43:11,716 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:43:11,718 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:43:11,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1310 sec 2024-12-12T05:43:11,735 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733982191735, completionTime=-1 2024-12-12T05:43:11,737 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-12T05:43:11,737 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-12T05:43:11,758 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-12T05:43:11,759 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733982251758 2024-12-12T05:43:11,759 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733982311759 2024-12-12T05:43:11,759 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 21 msec 2024-12-12T05:43:11,760 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-12T05:43:11,766 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,33367,1733982188783-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,766 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,33367,1733982188783-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,766 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,33367,1733982188783-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,768 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ffbfd3107920:33367, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,768 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,769 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,775 DEBUG [master/ffbfd3107920:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-12T05:43:11,797 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.073sec 2024-12-12T05:43:11,798 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T05:43:11,800 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T05:43:11,800 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T05:43:11,801 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T05:43:11,801 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T05:43:11,801 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,33367,1733982188783-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:11,802 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,33367,1733982188783-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T05:43:11,806 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-12T05:43:11,807 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T05:43:11,807 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,33367,1733982188783-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:11,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@745924a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:43:11,865 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T05:43:11,865 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T05:43:11,867 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ffbfd3107920,33367,-1 for getting cluster id 2024-12-12T05:43:11,869 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T05:43:11,896 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'da28258d-af29-4d8e-a4e7-5af5dbf99d22' 2024-12-12T05:43:11,899 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T05:43:11,899 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "da28258d-af29-4d8e-a4e7-5af5dbf99d22" 2024-12-12T05:43:11,899 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f1b3046, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:43:11,900 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ffbfd3107920,33367,-1] 2024-12-12T05:43:11,903 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T05:43:11,905 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:11,905 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35820, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T05:43:11,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b62236d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:43:11,909 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T05:43:11,916 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ffbfd3107920,45027,1733982189466, seqNum=-1] 2024-12-12T05:43:11,916 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:43:11,922 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:43:11,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ffbfd3107920,33367,1733982188783 2024-12-12T05:43:11,949 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-12T05:43:11,953 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is ffbfd3107920,33367,1733982188783 2024-12-12T05:43:11,955 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42f1365a 2024-12-12T05:43:11,956 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:43:11,958 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:43:11,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:43:11,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-12T05:43:11,971 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:43:11,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-12T05:43:11,973 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:11,975 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:43:11,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:11,983 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:11,983 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:11,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:38596 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:33191:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38596 dst: /127.0.0.1:33191 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:11,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-12T05:43:11,990 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:11,993 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5aafd508b3ac6343fdb30d332a0ff5fb, NAME => 'TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec 2024-12-12T05:43:11,999 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:11,999 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:12,001 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:41998 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41998 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:12,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-12T05:43:12,006 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:12,006 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:12,007 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 5aafd508b3ac6343fdb30d332a0ff5fb, disabling compactions & flushes 2024-12-12T05:43:12,007 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,007 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,007 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. after waiting 0 ms 2024-12-12T05:43:12,007 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,007 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,007 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5aafd508b3ac6343fdb30d332a0ff5fb: Waiting for close lock at 1733982192007Disabling compacts and flushes for region at 1733982192007Disabling writes for close at 1733982192007Writing region close event to WAL at 1733982192007Closed at 1733982192007 2024-12-12T05:43:12,009 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:43:12,013 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733982192009"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733982192009"}]},"ts":"1733982192009"} 2024-12-12T05:43:12,017 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-12T05:43:12,019 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:43:12,021 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982192019"}]},"ts":"1733982192019"} 2024-12-12T05:43:12,026 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-12T05:43:12,026 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ffbfd3107920=0} racks are {/default-rack=0} 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T05:43:12,027 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T05:43:12,027 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T05:43:12,027 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T05:43:12,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T05:43:12,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5aafd508b3ac6343fdb30d332a0ff5fb, ASSIGN}] 2024-12-12T05:43:12,031 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5aafd508b3ac6343fdb30d332a0ff5fb, ASSIGN 2024-12-12T05:43:12,033 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5aafd508b3ac6343fdb30d332a0ff5fb, ASSIGN; state=OFFLINE, location=ffbfd3107920,39003,1733982189622; forceNewPlan=false, retain=false 2024-12-12T05:43:12,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:12,186 INFO [ffbfd3107920:33367 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T05:43:12,187 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5aafd508b3ac6343fdb30d332a0ff5fb, regionState=OPENING, regionLocation=ffbfd3107920,39003,1733982189622 2024-12-12T05:43:12,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5aafd508b3ac6343fdb30d332a0ff5fb, ASSIGN because future has completed 2024-12-12T05:43:12,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5aafd508b3ac6343fdb30d332a0ff5fb, server=ffbfd3107920,39003,1733982189622}] 2024-12-12T05:43:12,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:12,349 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T05:43:12,353 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37481, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T05:43:12,362 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,362 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5aafd508b3ac6343fdb30d332a0ff5fb, NAME => 'TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:43:12,362 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,363 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:12,363 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,363 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,365 INFO [StoreOpener-5aafd508b3ac6343fdb30d332a0ff5fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,367 INFO [StoreOpener-5aafd508b3ac6343fdb30d332a0ff5fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5aafd508b3ac6343fdb30d332a0ff5fb columnFamilyName cf 2024-12-12T05:43:12,368 DEBUG [StoreOpener-5aafd508b3ac6343fdb30d332a0ff5fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:12,369 INFO [StoreOpener-5aafd508b3ac6343fdb30d332a0ff5fb-1 {}] regionserver.HStore(327): Store=5aafd508b3ac6343fdb30d332a0ff5fb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:12,369 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,371 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,371 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,372 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,372 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,375 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,380 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:43:12,381 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5aafd508b3ac6343fdb30d332a0ff5fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75487006, jitterRate=0.12484404444694519}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T05:43:12,381 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:12,382 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5aafd508b3ac6343fdb30d332a0ff5fb: Running coprocessor pre-open hook at 1733982192363Writing region info on filesystem at 1733982192363Initializing all the Stores at 1733982192365 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982192365Cleaning up temporary data from old regions at 1733982192372 (+7 ms)Running coprocessor post-open hooks at 1733982192381 (+9 ms)Region opened successfully at 1733982192382 (+1 ms) 2024-12-12T05:43:12,383 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb., pid=6, masterSystemTime=1733982192349 2024-12-12T05:43:12,386 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,387 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,388 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5aafd508b3ac6343fdb30d332a0ff5fb, regionState=OPEN, openSeqNum=2, regionLocation=ffbfd3107920,39003,1733982189622 2024-12-12T05:43:12,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5aafd508b3ac6343fdb30d332a0ff5fb, server=ffbfd3107920,39003,1733982189622 because future has completed 2024-12-12T05:43:12,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T05:43:12,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5aafd508b3ac6343fdb30d332a0ff5fb, server=ffbfd3107920,39003,1733982189622 in 200 msec 2024-12-12T05:43:12,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T05:43:12,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5aafd508b3ac6343fdb30d332a0ff5fb, ASSIGN in 368 msec 2024-12-12T05:43:12,402 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:43:12,402 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982192402"}]},"ts":"1733982192402"} 2024-12-12T05:43:12,408 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-12T05:43:12,410 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:43:12,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 446 msec 2024-12-12T05:43:12,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:12,614 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-12T05:43:12,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-12T05:43:12,617 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T05:43:12,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-12T05:43:12,624 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T05:43:12,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-12T05:43:12,632 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb., hostname=ffbfd3107920,39003,1733982189622, seqNum=2] 2024-12-12T05:43:12,633 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:43:12,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49756, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:43:12,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-12T05:43:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-12T05:43:12,649 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:43:12,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T05:43:12,651 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:43:12,652 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:43:12,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T05:43:12,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39003 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-12T05:43:12,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,822 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5aafd508b3ac6343fdb30d332a0ff5fb 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-12T05:43:12,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/.tmp/cf/c1d28bf8e9d34a589774e3f9559c88c7 is 36, key is row/cf:cq/1733982192636/Put/seqid=0 2024-12-12T05:43:12,877 WARN [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:12,877 WARN [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:12,881 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2086714377_22 at /127.0.0.1:42022 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42022 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-12T05:43:12,886 WARN [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:12,886 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/.tmp/cf/c1d28bf8e9d34a589774e3f9559c88c7 2024-12-12T05:43:12,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/.tmp/cf/c1d28bf8e9d34a589774e3f9559c88c7 as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/cf/c1d28bf8e9d34a589774e3f9559c88c7 2024-12-12T05:43:12,937 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/cf/c1d28bf8e9d34a589774e3f9559c88c7, entries=1, sequenceid=5, filesize=4.7 K 2024-12-12T05:43:12,943 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 5aafd508b3ac6343fdb30d332a0ff5fb in 121ms, sequenceid=5, compaction requested=false 2024-12-12T05:43:12,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-12T05:43:12,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5aafd508b3ac6343fdb30d332a0ff5fb: 2024-12-12T05:43:12,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:12,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-12T05:43:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-12T05:43:12,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-12T05:43:12,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 299 msec 2024-12-12T05:43:12,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 313 msec 2024-12-12T05:43:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33367 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T05:43:12,963 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-12T05:43:12,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-12T05:43:12,977 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T05:43:12,977 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:12,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:12,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:12,982 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T05:43:12,982 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T05:43:12,982 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1544060388, stopped=false 2024-12-12T05:43:12,982 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ffbfd3107920,33367,1733982188783 2024-12-12T05:43:13,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:13,031 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:13,031 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:13,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:13,031 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:13,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:13,031 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:13,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:13,032 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-12T05:43:13,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:13,033 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T05:43:13,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:13,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:13,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:13,034 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:13,034 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(878): Closing user regions 2024-12-12T05:43:13,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,035 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ffbfd3107920,45027,1733982189466' ***** 2024-12-12T05:43:13,035 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T05:43:13,035 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ffbfd3107920,46565,1733982189580' ***** 2024-12-12T05:43:13,035 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T05:43:13,035 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ffbfd3107920,39003,1733982189622' ***** 2024-12-12T05:43:13,036 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T05:43:13,036 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T05:43:13,036 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T05:43:13,036 INFO [RS:1;ffbfd3107920:46565 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:43:13,036 INFO [RS:2;ffbfd3107920:39003 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:43:13,036 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T05:43:13,036 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T05:43:13,037 INFO [RS:2;ffbfd3107920:39003 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:43:13,037 INFO [RS:1;ffbfd3107920:46565 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:43:13,037 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(959): stopping server ffbfd3107920,46565,1733982189580 2024-12-12T05:43:13,037 INFO [RS:1;ffbfd3107920:46565 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:13,037 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(3091): Received CLOSE for 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:13,037 INFO [RS:1;ffbfd3107920:46565 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ffbfd3107920:46565. 2024-12-12T05:43:13,037 DEBUG [RS:1;ffbfd3107920:46565 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:13,037 DEBUG [RS:1;ffbfd3107920:46565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,037 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(976): stopping server ffbfd3107920,46565,1733982189580; all regions closed. 2024-12-12T05:43:13,038 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(959): stopping server ffbfd3107920,39003,1733982189622 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T05:43:13,038 INFO [RS:2;ffbfd3107920:39003 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:43:13,038 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T05:43:13,038 INFO [RS:2;ffbfd3107920:39003 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ffbfd3107920:39003. 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(959): stopping server ffbfd3107920,45027,1733982189466 2024-12-12T05:43:13,038 DEBUG [RS:2;ffbfd3107920:39003 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:13,038 DEBUG [RS:2;ffbfd3107920:39003 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ffbfd3107920:45027. 2024-12-12T05:43:13,038 DEBUG [RS:0;ffbfd3107920:45027 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:13,038 DEBUG [RS:0;ffbfd3107920:45027 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,038 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-12T05:43:13,038 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1325): Online Regions={5aafd508b3ac6343fdb30d332a0ff5fb=TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb.} 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:43:13,038 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-12T05:43:13,038 DEBUG [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1351): Waiting on 5aafd508b3ac6343fdb30d332a0ff5fb 2024-12-12T05:43:13,039 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-12T05:43:13,039 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5aafd508b3ac6343fdb30d332a0ff5fb, disabling compactions & flushes 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-12T05:43:13,039 DEBUG [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-12T05:43:13,039 INFO [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:13,039 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. after waiting 0 ms 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:13,039 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T05:43:13,039 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-12T05:43:13,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_1073741827_1017 (size=93) 2024-12-12T05:43:13,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_1073741827_1017 (size=93) 2024-12-12T05:43:13,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_1073741827_1017 (size=93) 2024-12-12T05:43:13,049 DEBUG [RS:1;ffbfd3107920:46565 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs 2024-12-12T05:43:13,049 INFO [RS:1;ffbfd3107920:46565 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ffbfd3107920%2C46565%2C1733982189580:(num 1733982191096) 2024-12-12T05:43:13,049 DEBUG [RS:1;ffbfd3107920:46565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] hbase.ChoreService(370): Chore service for: regionserver/ffbfd3107920:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:43:13,050 INFO [regionserver/ffbfd3107920:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:43:13,050 INFO [RS:1;ffbfd3107920:46565 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:13,051 INFO [RS:1;ffbfd3107920:46565 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46565 2024-12-12T05:43:13,062 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/default/TestHBaseWalOnEC/5aafd508b3ac6343fdb30d332a0ff5fb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T05:43:13,064 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ffbfd3107920,46565,1733982189580 2024-12-12T05:43:13,064 INFO [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:13,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:13,064 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5aafd508b3ac6343fdb30d332a0ff5fb: Waiting for close lock at 1733982193039Running coprocessor pre-close hooks at 1733982193039Disabling compacts and flushes for region at 1733982193039Disabling writes for close at 1733982193039Writing region close event to WAL at 1733982193049 (+10 ms)Running coprocessor post-close hooks at 1733982193063 (+14 ms)Closed at 1733982193064 (+1 ms) 2024-12-12T05:43:13,064 INFO [RS:1;ffbfd3107920:46565 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:13,065 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb. 2024-12-12T05:43:13,065 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ffbfd3107920,46565,1733982189580] 2024-12-12T05:43:13,071 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/info/9fce399e88c848bbae5dbd996688d1ee is 153, key is TestHBaseWalOnEC,,1733982191959.5aafd508b3ac6343fdb30d332a0ff5fb./info:regioninfo/1733982192388/Put/seqid=0 2024-12-12T05:43:13,074 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,074 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1531298029_22 at /127.0.0.1:42054 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42054 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,080 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ffbfd3107920,46565,1733982189580 already deleted, retry=false 2024-12-12T05:43:13,081 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ffbfd3107920,46565,1733982189580 expired; onlineServers=2 2024-12-12T05:43:13,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-12T05:43:13,082 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:13,082 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/info/9fce399e88c848bbae5dbd996688d1ee 2024-12-12T05:43:13,106 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/ns/ae865880586141af9b2ee6a5981a715e is 43, key is default/ns:d/1733982191721/Put/seqid=0 2024-12-12T05:43:13,109 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,109 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1531298029_22 at /127.0.0.1:42064 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42064 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-12T05:43:13,118 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:13,118 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/ns/ae865880586141af9b2ee6a5981a715e 2024-12-12T05:43:13,133 INFO [regionserver/ffbfd3107920:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:13,135 INFO [regionserver/ffbfd3107920:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:13,135 INFO [regionserver/ffbfd3107920:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:13,143 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/table/89141ff98038490d9b590b4cdb44a6e9 is 52, key is TestHBaseWalOnEC/table:state/1733982192402/Put/seqid=0 2024-12-12T05:43:13,145 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,145 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1531298029_22 at /127.0.0.1:42082 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42082 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-12T05:43:13,152 WARN [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:13,152 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/table/89141ff98038490d9b590b4cdb44a6e9 2024-12-12T05:43:13,162 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/info/9fce399e88c848bbae5dbd996688d1ee as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/info/9fce399e88c848bbae5dbd996688d1ee 2024-12-12T05:43:13,171 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/info/9fce399e88c848bbae5dbd996688d1ee, entries=10, sequenceid=11, filesize=6.5 K 2024-12-12T05:43:13,173 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:13,173 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46565-0x100189a30850002, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:13,173 INFO [RS:1;ffbfd3107920:46565 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:13,173 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/ns/ae865880586141af9b2ee6a5981a715e as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/ns/ae865880586141af9b2ee6a5981a715e 2024-12-12T05:43:13,173 INFO [RS:1;ffbfd3107920:46565 {}] regionserver.HRegionServer(1031): Exiting; stopping=ffbfd3107920,46565,1733982189580; zookeeper connection closed. 2024-12-12T05:43:13,174 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1bb10949 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1bb10949 2024-12-12T05:43:13,182 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/ns/ae865880586141af9b2ee6a5981a715e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-12T05:43:13,183 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/.tmp/table/89141ff98038490d9b590b4cdb44a6e9 as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/table/89141ff98038490d9b590b4cdb44a6e9 2024-12-12T05:43:13,192 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/table/89141ff98038490d9b590b4cdb44a6e9, entries=2, sequenceid=11, filesize=5.1 K 2024-12-12T05:43:13,193 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-12-12T05:43:13,193 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T05:43:13,201 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-12T05:43:13,202 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T05:43:13,202 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-12T05:43:13,203 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733982193039Running coprocessor pre-close hooks at 1733982193039Disabling compacts and flushes for region at 1733982193039Disabling writes for close at 1733982193039Obtaining lock to block concurrent updates at 1733982193039Preparing flush snapshotting stores in 1588230740 at 1733982193039Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733982193040 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733982193041 (+1 ms)Flushing 1588230740/info: creating writer at 1733982193041Flushing 1588230740/info: appending metadata at 1733982193068 (+27 ms)Flushing 1588230740/info: closing flushed file at 1733982193068Flushing 1588230740/ns: creating writer at 1733982193091 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733982193105 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733982193106 (+1 ms)Flushing 1588230740/table: creating writer at 1733982193127 (+21 ms)Flushing 1588230740/table: appending metadata at 1733982193142 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733982193142Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70fb3321: reopening flushed file at 1733982193161 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d76c57c: reopening flushed file at 1733982193172 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5150e2d9: reopening flushed file at 1733982193182 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1733982193193 (+11 ms)Writing region close event to WAL at 1733982193195 (+2 ms)Running coprocessor post-close hooks at 1733982193202 (+7 ms)Closed at 1733982193202 2024-12-12T05:43:13,203 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T05:43:13,239 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(976): stopping server ffbfd3107920,39003,1733982189622; all regions closed. 2024-12-12T05:43:13,239 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(976): stopping server ffbfd3107920,45027,1733982189466; all regions closed. 2024-12-12T05:43:13,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_1073741826_1016 (size=1298) 2024-12-12T05:43:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_1073741826_1016 (size=1298) 2024-12-12T05:43:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_1073741829_1019 (size=2751) 2024-12-12T05:43:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_1073741826_1016 (size=1298) 2024-12-12T05:43:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_1073741829_1019 (size=2751) 2024-12-12T05:43:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_1073741829_1019 (size=2751) 2024-12-12T05:43:13,249 DEBUG [RS:2;ffbfd3107920:39003 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs 2024-12-12T05:43:13,249 INFO [RS:2;ffbfd3107920:39003 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ffbfd3107920%2C39003%2C1733982189622:(num 1733982191096) 2024-12-12T05:43:13,249 DEBUG [RS:2;ffbfd3107920:39003 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,249 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:13,249 DEBUG [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs 2024-12-12T05:43:13,249 INFO [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ffbfd3107920%2C45027%2C1733982189466.meta:.meta(num 1733982191551) 2024-12-12T05:43:13,249 INFO [RS:2;ffbfd3107920:39003 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:13,249 INFO [RS:2;ffbfd3107920:39003 {}] hbase.ChoreService(370): Chore service for: regionserver/ffbfd3107920:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:13,250 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:43:13,250 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:43:13,250 INFO [regionserver/ffbfd3107920:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:13,250 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:43:13,250 INFO [RS:2;ffbfd3107920:39003 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:13,250 INFO [RS:2;ffbfd3107920:39003 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39003 2024-12-12T05:43:13,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_1073741828_1018 (size=93) 2024-12-12T05:43:13,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_1073741828_1018 (size=93) 2024-12-12T05:43:13,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_1073741828_1018 (size=93) 2024-12-12T05:43:13,256 DEBUG [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/oldWALs 2024-12-12T05:43:13,256 INFO [RS:0;ffbfd3107920:45027 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ffbfd3107920%2C45027%2C1733982189466:(num 1733982191096) 2024-12-12T05:43:13,256 DEBUG [RS:0;ffbfd3107920:45027 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:13,256 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:13,256 INFO [RS:0;ffbfd3107920:45027 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:13,256 INFO [RS:0;ffbfd3107920:45027 {}] hbase.ChoreService(370): Chore service for: regionserver/ffbfd3107920:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:13,257 INFO [RS:0;ffbfd3107920:45027 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:13,257 INFO [regionserver/ffbfd3107920:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:13,257 INFO [RS:0;ffbfd3107920:45027 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45027 2024-12-12T05:43:13,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ffbfd3107920,39003,1733982189622 2024-12-12T05:43:13,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:13,273 INFO [RS:2;ffbfd3107920:39003 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:13,273 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$371/0x00007fd97c8f5858@6d253e66 rejected from java.util.concurrent.ThreadPoolExecutor@2827bda6[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-12T05:43:13,281 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ffbfd3107920,45027,1733982189466 2024-12-12T05:43:13,281 INFO [RS:0;ffbfd3107920:45027 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:13,289 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ffbfd3107920,39003,1733982189622] 2024-12-12T05:43:13,306 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ffbfd3107920,39003,1733982189622 already deleted, retry=false 2024-12-12T05:43:13,306 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ffbfd3107920,39003,1733982189622 expired; onlineServers=1 2024-12-12T05:43:13,306 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ffbfd3107920,45027,1733982189466] 2024-12-12T05:43:13,314 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ffbfd3107920,45027,1733982189466 already deleted, retry=false 2024-12-12T05:43:13,314 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ffbfd3107920,45027,1733982189466 expired; onlineServers=0 2024-12-12T05:43:13,314 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ffbfd3107920,33367,1733982188783' ***** 2024-12-12T05:43:13,315 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T05:43:13,315 INFO [M:0;ffbfd3107920:33367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:13,315 INFO [M:0;ffbfd3107920:33367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:13,315 DEBUG [M:0;ffbfd3107920:33367 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T05:43:13,316 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T05:43:13,316 DEBUG [M:0;ffbfd3107920:33367 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T05:43:13,316 DEBUG [master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.small.0-1733982190756 {}] cleaner.HFileCleaner(306): Exit Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.small.0-1733982190756,5,FailOnTimeoutGroup] 2024-12-12T05:43:13,316 DEBUG [master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.large.0-1733982190749 {}] cleaner.HFileCleaner(306): Exit Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.large.0-1733982190749,5,FailOnTimeoutGroup] 2024-12-12T05:43:13,317 INFO [M:0;ffbfd3107920:33367 {}] hbase.ChoreService(370): Chore service for: master/ffbfd3107920:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:13,317 INFO [M:0;ffbfd3107920:33367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:13,317 DEBUG [M:0;ffbfd3107920:33367 {}] master.HMaster(1795): Stopping service threads 2024-12-12T05:43:13,317 INFO [M:0;ffbfd3107920:33367 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T05:43:13,318 INFO [M:0;ffbfd3107920:33367 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-12T05:43:13,319 INFO [M:0;ffbfd3107920:33367 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T05:43:13,319 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T05:43:13,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:13,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:13,322 DEBUG [M:0;ffbfd3107920:33367 {}] zookeeper.ZKUtil(347): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T05:43:13,322 WARN [M:0;ffbfd3107920:33367 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T05:43:13,323 INFO [M:0;ffbfd3107920:33367 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/.lastflushedseqids 2024-12-12T05:43:13,334 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,334 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,336 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:38614 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33191:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38614 dst: /127.0.0.1:33191 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-12T05:43:13,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:13,389 INFO [RS:2;ffbfd3107920:39003 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:13,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39003-0x100189a30850003, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:13,390 INFO [RS:2;ffbfd3107920:39003 {}] regionserver.HRegionServer(1031): Exiting; stopping=ffbfd3107920,39003,1733982189622; zookeeper connection closed. 2024-12-12T05:43:13,390 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6702f9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6702f9f 2024-12-12T05:43:13,398 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:13,398 INFO [RS:0;ffbfd3107920:45027 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:13,398 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45027-0x100189a30850001, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:13,398 INFO [RS:0;ffbfd3107920:45027 {}] regionserver.HRegionServer(1031): Exiting; stopping=ffbfd3107920,45027,1733982189466; zookeeper connection closed. 2024-12-12T05:43:13,398 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@77c5f5c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@77c5f5c0 2024-12-12T05:43:13,400 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-12T05:43:13,743 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:13,743 INFO [M:0;ffbfd3107920:33367 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-12T05:43:13,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-12T05:43:13,744 INFO [M:0;ffbfd3107920:33367 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T05:43:13,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-12T05:43:13,744 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T05:43:13,744 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:13,744 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:13,744 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T05:43:13,744 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:13,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-12T05:43:13,744 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-12T05:43:13,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-12T05:43:13,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-12T05:43:13,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-12T05:43:13,762 DEBUG [M:0;ffbfd3107920:33367 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2cfe4d552334473a8437465489f6065c is 82, key is hbase:meta,,1/info:regioninfo/1733982191628/Put/seqid=0 2024-12-12T05:43:13,764 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,764 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-12T05:43:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-12T05:43:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-12T05:43:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-12T05:43:13,771 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:58268 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:38943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58268 dst: /127.0.0.1:38943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-12T05:43:13,775 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:13,776 INFO [M:0;ffbfd3107920:33367 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2cfe4d552334473a8437465489f6065c 2024-12-12T05:43:13,802 DEBUG [M:0;ffbfd3107920:33367 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b43dd027216046f4b5e5cf28584c5d31 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733982192413/Put/seqid=0 2024-12-12T05:43:13,804 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,805 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:42146 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42146 dst: /127.0.0.1:43639 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-12T05:43:13,812 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:13,812 INFO [M:0;ffbfd3107920:33367 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b43dd027216046f4b5e5cf28584c5d31 2024-12-12T05:43:13,835 DEBUG [M:0;ffbfd3107920:33367 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/94aa0bb309174d72865de66643c59833 is 69, key is ffbfd3107920,39003,1733982189622/rs:state/1733982190803/Put/seqid=0 2024-12-12T05:43:13,837 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,837 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-12T05:43:13,840 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1324261263_22 at /127.0.0.1:38654 [Receiving block BP-1389929639-172.17.0.2-1733982184928:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33191:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38654 dst: /127.0.0.1:33191 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:43:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-12T05:43:14,249 WARN [M:0;ffbfd3107920:33367 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-12T05:43:14,250 INFO [M:0;ffbfd3107920:33367 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/94aa0bb309174d72865de66643c59833 2024-12-12T05:43:14,263 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2cfe4d552334473a8437465489f6065c as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2cfe4d552334473a8437465489f6065c 2024-12-12T05:43:14,271 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2cfe4d552334473a8437465489f6065c, entries=8, sequenceid=72, filesize=5.5 K 2024-12-12T05:43:14,272 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b43dd027216046f4b5e5cf28584c5d31 as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b43dd027216046f4b5e5cf28584c5d31 2024-12-12T05:43:14,280 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b43dd027216046f4b5e5cf28584c5d31, entries=8, sequenceid=72, filesize=6.3 K 2024-12-12T05:43:14,281 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/94aa0bb309174d72865de66643c59833 as hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/94aa0bb309174d72865de66643c59833 2024-12-12T05:43:14,288 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/94aa0bb309174d72865de66643c59833, entries=3, sequenceid=72, filesize=5.2 K 2024-12-12T05:43:14,290 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 545ms, sequenceid=72, compaction requested=false 2024-12-12T05:43:14,291 INFO [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:14,291 DEBUG [M:0;ffbfd3107920:33367 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733982193744Disabling compacts and flushes for region at 1733982193744Disabling writes for close at 1733982193744Obtaining lock to block concurrent updates at 1733982193744Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733982193744Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733982193745 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733982193746 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733982193746Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733982193761 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733982193761Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733982193783 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733982193802 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733982193802Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733982193820 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733982193835 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733982193835Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@755f7673: reopening flushed file at 1733982194262 (+427 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@221b72a4: reopening flushed file at 1733982194271 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c2a9e59: reopening flushed file at 1733982194280 (+9 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 545ms, sequenceid=72, compaction requested=false at 1733982194290 (+10 ms)Writing region close event to WAL at 1733982194291 (+1 ms)Closed at 1733982194291 2024-12-12T05:43:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43639 is added to blk_1073741825_1011 (size=32662) 2024-12-12T05:43:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38943 is added to blk_1073741825_1011 (size=32662) 2024-12-12T05:43:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33191 is added to blk_1073741825_1011 (size=32662) 2024-12-12T05:43:14,295 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:14,295 INFO [M:0;ffbfd3107920:33367 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-12T05:43:14,295 INFO [M:0;ffbfd3107920:33367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33367 2024-12-12T05:43:14,296 INFO [M:0;ffbfd3107920:33367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:14,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:14,423 INFO [M:0;ffbfd3107920:33367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:14,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33367-0x100189a30850000, quorum=127.0.0.1:51134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:14,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3297a183{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:14,464 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b8a83a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:14,464 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:14,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c0da3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:14,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@656f7043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:14,468 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:43:14,468 WARN [BP-1389929639-172.17.0.2-1733982184928 heartbeating to localhost/127.0.0.1:33553 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:43:14,468 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:43:14,468 WARN [BP-1389929639-172.17.0.2-1733982184928 heartbeating to localhost/127.0.0.1:33553 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1389929639-172.17.0.2-1733982184928 (Datanode Uuid b556d465-4299-4bd3-b90e-6bc00d999d14) service to localhost/127.0.0.1:33553 2024-12-12T05:43:14,470 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data5/current/BP-1389929639-172.17.0.2-1733982184928 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:14,470 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data6/current/BP-1389929639-172.17.0.2-1733982184928 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:14,471 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:43:14,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1548acd1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:14,475 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dc20694{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:14,475 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:14,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fb4f3a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:14,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37e44dc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:14,477 WARN [BP-1389929639-172.17.0.2-1733982184928 heartbeating to localhost/127.0.0.1:33553 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:43:14,477 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:43:14,477 WARN [BP-1389929639-172.17.0.2-1733982184928 heartbeating to localhost/127.0.0.1:33553 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1389929639-172.17.0.2-1733982184928 (Datanode Uuid 7969e3e7-c778-41c1-b5c4-b84e08e6e234) service to localhost/127.0.0.1:33553 2024-12-12T05:43:14,477 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:43:14,478 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data3/current/BP-1389929639-172.17.0.2-1733982184928 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:14,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data4/current/BP-1389929639-172.17.0.2-1733982184928 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:14,479 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:43:14,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d1a7cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:14,482 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20b70ca3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:14,482 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:14,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54f91ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:14,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d0819de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:14,484 WARN [BP-1389929639-172.17.0.2-1733982184928 heartbeating to localhost/127.0.0.1:33553 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:43:14,484 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:43:14,484 WARN [BP-1389929639-172.17.0.2-1733982184928 heartbeating to localhost/127.0.0.1:33553 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1389929639-172.17.0.2-1733982184928 (Datanode Uuid c227c6b4-17df-4e8f-8bfd-f6cbf1de9c03) service to localhost/127.0.0.1:33553 2024-12-12T05:43:14,484 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:43:14,485 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data1/current/BP-1389929639-172.17.0.2-1733982184928 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:14,485 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/cluster_09644293-8364-632e-ad08-3762a019c6e8/data/data2/current/BP-1389929639-172.17.0.2-1733982184928 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:14,485 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:43:14,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T05:43:14,493 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:14,493 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:14,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:14,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:14,501 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-12T05:43:14,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-12T05:43:14,532 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=87 (was 157), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=298 (was 298), ProcessCount=11 (was 11), AvailableMemoryMB=9523 (was 9817) 2024-12-12T05:43:14,537 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=87, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=298, ProcessCount=11, AvailableMemoryMB=9523 2024-12-12T05:43:14,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.log.dir so I do NOT create it in target/test-data/497967e3-51c3-4554-eda4-de74555bee5e 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cd6aebff-53a3-9ca7-5d97-8b857a55bad7/hadoop.tmp.dir so I do NOT create it in target/test-data/497967e3-51c3-4554-eda4-de74555bee5e 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0, deleteOnExit=true 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/test.cache.data in system properties and HBase conf 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir in system properties and HBase conf 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T05:43:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-12T05:43:14,538 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T05:43:14,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/nfs.dump.dir in system properties and HBase conf 2024-12-12T05:43:14,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/java.io.tmpdir in system properties and HBase conf 2024-12-12T05:43:14,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T05:43:14,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T05:43:14,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T05:43:14,818 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:14,822 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:14,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:14,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:14,823 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T05:43:14,824 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:14,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bc7756d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:14,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7427c398{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:14,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ffee01{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/java.io.tmpdir/jetty-localhost-34441-hadoop-hdfs-3_4_1-tests_jar-_-any-6126510990616362783/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T05:43:14,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66a76d08{HTTP/1.1, (http/1.1)}{localhost:34441} 2024-12-12T05:43:14,913 INFO [Time-limited test {}] server.Server(415): Started @11540ms 2024-12-12T05:43:15,117 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:15,120 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:15,121 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:15,121 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:15,121 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T05:43:15,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e4b8c76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:15,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d78823c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:15,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@90c4d2d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/java.io.tmpdir/jetty-localhost-32987-hadoop-hdfs-3_4_1-tests_jar-_-any-13032101639608575339/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:15,211 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71b2c5b9{HTTP/1.1, (http/1.1)}{localhost:32987} 2024-12-12T05:43:15,211 INFO [Time-limited test {}] server.Server(415): Started @11838ms 2024-12-12T05:43:15,212 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:43:15,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:15,245 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:15,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:15,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:15,246 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T05:43:15,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7579e296{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:15,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bedcc8f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:15,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e4b56fd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/java.io.tmpdir/jetty-localhost-46171-hadoop-hdfs-3_4_1-tests_jar-_-any-17559963775897769900/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:15,338 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29550204{HTTP/1.1, (http/1.1)}{localhost:46171} 2024-12-12T05:43:15,338 INFO [Time-limited test {}] server.Server(415): Started @11965ms 2024-12-12T05:43:15,339 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:43:15,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:43:15,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:43:15,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:43:15,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:43:15,368 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T05:43:15,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ad282c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:43:15,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@769ec274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:43:15,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22ace0e0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/java.io.tmpdir/jetty-localhost-45563-hadoop-hdfs-3_4_1-tests_jar-_-any-16116640200374918577/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:15,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4246e5dd{HTTP/1.1, (http/1.1)}{localhost:45563} 2024-12-12T05:43:15,459 INFO [Time-limited test {}] server.Server(415): Started @12085ms 2024-12-12T05:43:15,460 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:43:15,942 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data1/current/BP-1612086000-172.17.0.2-1733982194562/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:15,942 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data2/current/BP-1612086000-172.17.0.2-1733982194562/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:15,957 WARN [Thread-506 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:43:15,960 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2bbf75cb715f09b9 with lease ID 0xe2bfc64b371285c2: Processing first storage report for DS-3c7f1178-17ba-4b9f-ae37-969aa9162bc6 from datanode DatanodeRegistration(127.0.0.1:36661, datanodeUuid=31287333-2d80-4a57-a80b-c4cfba24c1b0, infoPort=34039, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562) 2024-12-12T05:43:15,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2bbf75cb715f09b9 with lease ID 0xe2bfc64b371285c2: from storage DS-3c7f1178-17ba-4b9f-ae37-969aa9162bc6 node DatanodeRegistration(127.0.0.1:36661, datanodeUuid=31287333-2d80-4a57-a80b-c4cfba24c1b0, infoPort=34039, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T05:43:15,960 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2bbf75cb715f09b9 with lease ID 0xe2bfc64b371285c2: Processing first storage report for DS-0eb523b5-7e01-4f3b-a237-e86b8b8f48b8 from datanode DatanodeRegistration(127.0.0.1:36661, datanodeUuid=31287333-2d80-4a57-a80b-c4cfba24c1b0, infoPort=34039, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562) 2024-12-12T05:43:15,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2bbf75cb715f09b9 with lease ID 0xe2bfc64b371285c2: from storage DS-0eb523b5-7e01-4f3b-a237-e86b8b8f48b8 node DatanodeRegistration(127.0.0.1:36661, datanodeUuid=31287333-2d80-4a57-a80b-c4cfba24c1b0, infoPort=34039, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:16,290 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data3/current/BP-1612086000-172.17.0.2-1733982194562/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:16,290 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data4/current/BP-1612086000-172.17.0.2-1733982194562/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:16,311 WARN [Thread-529 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:43:16,314 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2856c5a0163e44ba with lease ID 0xe2bfc64b371285c3: Processing first storage report for DS-0d3b4f12-c1dc-4b64-a0e1-7ebc2a3c3b2d from datanode DatanodeRegistration(127.0.0.1:40655, datanodeUuid=579349ef-5cd0-4bc2-85ee-334752a310c3, infoPort=40015, infoSecurePort=0, ipcPort=46295, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562) 2024-12-12T05:43:16,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2856c5a0163e44ba with lease ID 0xe2bfc64b371285c3: from storage DS-0d3b4f12-c1dc-4b64-a0e1-7ebc2a3c3b2d node DatanodeRegistration(127.0.0.1:40655, datanodeUuid=579349ef-5cd0-4bc2-85ee-334752a310c3, infoPort=40015, infoSecurePort=0, ipcPort=46295, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T05:43:16,314 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2856c5a0163e44ba with lease ID 0xe2bfc64b371285c3: Processing first storage report for DS-c09b7564-383e-4247-9ffb-80a13b763cb1 from datanode DatanodeRegistration(127.0.0.1:40655, datanodeUuid=579349ef-5cd0-4bc2-85ee-334752a310c3, infoPort=40015, infoSecurePort=0, ipcPort=46295, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562) 2024-12-12T05:43:16,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2856c5a0163e44ba with lease ID 0xe2bfc64b371285c3: from storage DS-c09b7564-383e-4247-9ffb-80a13b763cb1 node DatanodeRegistration(127.0.0.1:40655, datanodeUuid=579349ef-5cd0-4bc2-85ee-334752a310c3, infoPort=40015, infoSecurePort=0, ipcPort=46295, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:16,316 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data5/current/BP-1612086000-172.17.0.2-1733982194562/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:16,317 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data6/current/BP-1612086000-172.17.0.2-1733982194562/current, will proceed with Du for space computation calculation, 2024-12-12T05:43:16,331 WARN [Thread-551 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:43:16,334 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9daebb8292ec0d32 with lease ID 0xe2bfc64b371285c4: Processing first storage report for DS-b2371051-e56b-4022-9e25-3941355a9f43 from datanode DatanodeRegistration(127.0.0.1:44435, datanodeUuid=5f66a038-0882-4551-bf2c-a045b4ecd1bc, infoPort=45337, infoSecurePort=0, ipcPort=38305, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562) 2024-12-12T05:43:16,334 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9daebb8292ec0d32 with lease ID 0xe2bfc64b371285c4: from storage DS-b2371051-e56b-4022-9e25-3941355a9f43 node DatanodeRegistration(127.0.0.1:44435, datanodeUuid=5f66a038-0882-4551-bf2c-a045b4ecd1bc, infoPort=45337, infoSecurePort=0, ipcPort=38305, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:16,334 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9daebb8292ec0d32 with lease ID 0xe2bfc64b371285c4: Processing first storage report for DS-ea50cb9a-86e7-48a0-8414-dad7278b6956 from datanode DatanodeRegistration(127.0.0.1:44435, datanodeUuid=5f66a038-0882-4551-bf2c-a045b4ecd1bc, infoPort=45337, infoSecurePort=0, ipcPort=38305, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562) 2024-12-12T05:43:16,334 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9daebb8292ec0d32 with lease ID 0xe2bfc64b371285c4: from storage DS-ea50cb9a-86e7-48a0-8414-dad7278b6956 node DatanodeRegistration(127.0.0.1:44435, datanodeUuid=5f66a038-0882-4551-bf2c-a045b4ecd1bc, infoPort=45337, infoSecurePort=0, ipcPort=38305, storageInfo=lv=-57;cid=testClusterID;nsid=2142840212;c=1733982194562), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T05:43:16,398 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e 2024-12-12T05:43:16,426 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/zookeeper_0, clientPort=59158, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T05:43:16,427 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59158 2024-12-12T05:43:16,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,430 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741825_1001 (size=7) 2024-12-12T05:43:16,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741825_1001 (size=7) 2024-12-12T05:43:16,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741825_1001 (size=7) 2024-12-12T05:43:16,447 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3 with version=8 2024-12-12T05:43:16,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33553/user/jenkins/test-data/521ef07d-038d-df4c-4b87-ceeddd2470ec/hbase-staging 2024-12-12T05:43:16,449 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:16,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,449 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:16,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:16,450 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-12T05:43:16,450 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:16,450 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39985 2024-12-12T05:43:16,451 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39985 connecting to ZooKeeper ensemble=127.0.0.1:59158 2024-12-12T05:43:16,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:399850x0, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:16,500 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39985-0x100189a51650000 connected 2024-12-12T05:43:16,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,567 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:16,570 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3, hbase.cluster.distributed=false 2024-12-12T05:43:16,574 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:16,574 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39985 2024-12-12T05:43:16,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39985 2024-12-12T05:43:16,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39985 2024-12-12T05:43:16,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39985 2024-12-12T05:43:16,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39985 2024-12-12T05:43:16,592 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:16,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,592 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:16,593 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,593 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:16,593 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:43:16,593 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:16,593 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45695 2024-12-12T05:43:16,594 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45695 connecting to ZooKeeper ensemble=127.0.0.1:59158 2024-12-12T05:43:16,595 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:456950x0, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:16,606 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45695-0x100189a51650001 connected 2024-12-12T05:43:16,606 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:16,606 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:43:16,607 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:43:16,607 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:43:16,608 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:16,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45695 2024-12-12T05:43:16,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45695 2024-12-12T05:43:16,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45695 2024-12-12T05:43:16,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45695 2024-12-12T05:43:16,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45695 2024-12-12T05:43:16,629 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:16,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,629 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:16,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:16,629 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:43:16,630 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:16,630 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39703 2024-12-12T05:43:16,631 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39703 connecting to ZooKeeper ensemble=127.0.0.1:59158 2024-12-12T05:43:16,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397030x0, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:16,647 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39703-0x100189a51650002 connected 2024-12-12T05:43:16,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:16,648 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:43:16,648 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:43:16,649 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:43:16,651 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:16,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39703 2024-12-12T05:43:16,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39703 2024-12-12T05:43:16,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39703 2024-12-12T05:43:16,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39703 2024-12-12T05:43:16,656 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39703 2024-12-12T05:43:16,672 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ffbfd3107920:0 server-side Connection retries=45 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:43:16,672 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:43:16,673 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41391 2024-12-12T05:43:16,674 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41391 connecting to ZooKeeper ensemble=127.0.0.1:59158 2024-12-12T05:43:16,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413910x0, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:43:16,689 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41391-0x100189a51650003 connected 2024-12-12T05:43:16,689 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:16,689 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:43:16,690 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:43:16,691 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:43:16,692 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:43:16,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41391 2024-12-12T05:43:16,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41391 2024-12-12T05:43:16,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41391 2024-12-12T05:43:16,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41391 2024-12-12T05:43:16,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41391 2024-12-12T05:43:16,706 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ffbfd3107920:39985 2024-12-12T05:43:16,707 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ffbfd3107920,39985,1733982196449 2024-12-12T05:43:16,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,714 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ffbfd3107920,39985,1733982196449 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,723 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T05:43:16,723 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ffbfd3107920,39985,1733982196449 from backup master directory 2024-12-12T05:43:16,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ffbfd3107920,39985,1733982196449 2024-12-12T05:43:16,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:43:16,730 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:16,730 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ffbfd3107920,39985,1733982196449 2024-12-12T05:43:16,736 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/hbase.id] with ID: 749380f4-a42d-4464-afa2-04ba503bd57c 2024-12-12T05:43:16,736 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/.tmp/hbase.id 2024-12-12T05:43:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741826_1002 (size=42) 2024-12-12T05:43:16,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741826_1002 (size=42) 2024-12-12T05:43:16,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741826_1002 (size=42) 2024-12-12T05:43:16,746 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/.tmp/hbase.id]:[hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/hbase.id] 2024-12-12T05:43:16,764 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:43:16,764 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-12T05:43:16,765 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-12T05:43:16,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741827_1003 (size=196) 2024-12-12T05:43:16,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741827_1003 (size=196) 2024-12-12T05:43:16,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741827_1003 (size=196) 2024-12-12T05:43:16,783 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:43:16,784 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T05:43:16,784 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-12T05:43:16,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741828_1004 (size=1189) 2024-12-12T05:43:16,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741828_1004 (size=1189) 2024-12-12T05:43:16,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741828_1004 (size=1189) 2024-12-12T05:43:16,800 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store 2024-12-12T05:43:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741829_1005 (size=34) 2024-12-12T05:43:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741829_1005 (size=34) 2024-12-12T05:43:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741829_1005 (size=34) 2024-12-12T05:43:16,811 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:16,811 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T05:43:16,811 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:16,811 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:16,811 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T05:43:16,811 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:16,811 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:16,812 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733982196811Disabling compacts and flushes for region at 1733982196811Disabling writes for close at 1733982196811Writing region close event to WAL at 1733982196811Closed at 1733982196811 2024-12-12T05:43:16,813 WARN [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/.initializing 2024-12-12T05:43:16,813 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/WALs/ffbfd3107920,39985,1733982196449 2024-12-12T05:43:16,817 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C39985%2C1733982196449, suffix=, logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/WALs/ffbfd3107920,39985,1733982196449, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/oldWALs, maxLogs=10 2024-12-12T05:43:16,817 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ffbfd3107920%2C39985%2C1733982196449.1733982196817 2024-12-12T05:43:16,829 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/WALs/ffbfd3107920,39985,1733982196449/ffbfd3107920%2C39985%2C1733982196449.1733982196817 2024-12-12T05:43:16,831 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45337:45337),(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:40015:40015)] 2024-12-12T05:43:16,831 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:43:16,832 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:16,832 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,832 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,836 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T05:43:16,836 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:16,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:16,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T05:43:16,839 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:16,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:16,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T05:43:16,842 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:16,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:16,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T05:43:16,846 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:16,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:16,847 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,847 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,848 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,849 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,850 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,850 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:43:16,852 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:43:16,855 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:43:16,855 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62348902, jitterRate=-0.07092896103858948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:43:16,856 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733982196832Initializing all the Stores at 1733982196833 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982196833Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982196834 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982196834Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982196834Cleaning up temporary data from old regions at 1733982196850 (+16 ms)Region opened successfully at 1733982196856 (+6 ms) 2024-12-12T05:43:16,859 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T05:43:16,864 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e755a71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:16,865 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-12T05:43:16,865 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T05:43:16,865 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T05:43:16,865 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T05:43:16,866 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-12T05:43:16,867 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-12T05:43:16,867 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T05:43:16,869 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T05:43:16,870 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T05:43:16,896 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-12T05:43:16,896 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T05:43:16,897 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T05:43:16,905 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-12T05:43:16,905 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T05:43:16,907 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T05:43:16,913 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-12T05:43:16,914 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T05:43:16,922 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T05:43:16,924 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T05:43:16,930 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T05:43:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:16,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,939 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ffbfd3107920,39985,1733982196449, sessionid=0x100189a51650000, setting cluster-up flag (Was=false) 2024-12-12T05:43:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,980 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T05:43:16,982 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ffbfd3107920,39985,1733982196449 2024-12-12T05:43:16,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:16,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:17,022 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T05:43:17,023 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ffbfd3107920,39985,1733982196449 2024-12-12T05:43:17,025 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-12T05:43:17,027 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:17,028 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-12T05:43:17,028 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T05:43:17,028 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ffbfd3107920,39985,1733982196449 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ffbfd3107920:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ffbfd3107920:0, corePoolSize=10, maxPoolSize=10 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:17,030 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733982227032 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T05:43:17,032 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T05:43:17,033 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:17,033 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-12T05:43:17,033 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,033 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T05:43:17,033 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T05:43:17,033 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T05:43:17,034 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T05:43:17,034 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T05:43:17,034 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.large.0-1733982197034,5,FailOnTimeoutGroup] 2024-12-12T05:43:17,034 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.small.0-1733982197034,5,FailOnTimeoutGroup] 2024-12-12T05:43:17,034 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,034 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,034 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T05:43:17,034 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,034 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,034 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T05:43:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741831_1007 (size=1321) 2024-12-12T05:43:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741831_1007 (size=1321) 2024-12-12T05:43:17,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741831_1007 (size=1321) 2024-12-12T05:43:17,045 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-12T05:43:17,045 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3 2024-12-12T05:43:17,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741832_1008 (size=32) 2024-12-12T05:43:17,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741832_1008 (size=32) 2024-12-12T05:43:17,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741832_1008 (size=32) 2024-12-12T05:43:17,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:17,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T05:43:17,061 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T05:43:17,061 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-12T05:43:17,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-12T05:43:17,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T05:43:17,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T05:43:17,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T05:43:17,067 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T05:43:17,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-12T05:43:17,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740 2024-12-12T05:43:17,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740 2024-12-12T05:43:17,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-12T05:43:17,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-12T05:43:17,071 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:43:17,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-12T05:43:17,075 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:43:17,075 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62222569, jitterRate=-0.07281146943569183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:43:17,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733982197058Initializing all the Stores at 1733982197059 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982197059Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982197059Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982197059Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982197060 (+1 ms)Cleaning up temporary data from old regions at 1733982197071 (+11 ms)Region opened successfully at 1733982197076 (+5 ms) 2024-12-12T05:43:17,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-12T05:43:17,076 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-12T05:43:17,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-12T05:43:17,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T05:43:17,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T05:43:17,077 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-12T05:43:17,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733982197076Disabling compacts and flushes for region at 1733982197076Disabling writes for close at 1733982197076Writing region close event to WAL at 1733982197077 (+1 ms)Closed at 1733982197077 2024-12-12T05:43:17,079 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:17,079 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-12T05:43:17,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T05:43:17,080 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T05:43:17,082 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T05:43:17,089 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-12T05:43:17,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-12T05:43:17,096 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(746): ClusterId : 749380f4-a42d-4464-afa2-04ba503bd57c 2024-12-12T05:43:17,096 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(746): ClusterId : 749380f4-a42d-4464-afa2-04ba503bd57c 2024-12-12T05:43:17,096 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(746): ClusterId : 749380f4-a42d-4464-afa2-04ba503bd57c 2024-12-12T05:43:17,096 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:43:17,096 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:43:17,096 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:43:17,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-12T05:43:17,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-12T05:43:17,130 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:43:17,130 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:43:17,130 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:43:17,131 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:43:17,131 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:43:17,131 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:43:17,147 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:43:17,147 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:43:17,147 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:43:17,148 DEBUG [RS:0;ffbfd3107920:45695 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@616e1995, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:17,148 DEBUG [RS:1;ffbfd3107920:39703 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c204206, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:17,148 DEBUG [RS:2;ffbfd3107920:41391 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d67840c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ffbfd3107920/172.17.0.2:0 2024-12-12T05:43:17,157 DEBUG [RS:1;ffbfd3107920:39703 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ffbfd3107920:39703 2024-12-12T05:43:17,157 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T05:43:17,157 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T05:43:17,157 DEBUG [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T05:43:17,158 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(2659): reportForDuty to master=ffbfd3107920,39985,1733982196449 with port=39703, startcode=1733982196629 2024-12-12T05:43:17,159 DEBUG [RS:1;ffbfd3107920:39703 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:43:17,161 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50541, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:43:17,161 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39985 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ffbfd3107920,39703,1733982196629 2024-12-12T05:43:17,161 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39985 {}] master.ServerManager(517): Registering regionserver=ffbfd3107920,39703,1733982196629 2024-12-12T05:43:17,162 DEBUG [RS:2;ffbfd3107920:41391 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ffbfd3107920:41391 2024-12-12T05:43:17,162 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ffbfd3107920:45695 2024-12-12T05:43:17,163 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T05:43:17,163 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T05:43:17,163 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T05:43:17,163 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T05:43:17,163 DEBUG [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T05:43:17,163 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T05:43:17,163 DEBUG [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3 2024-12-12T05:43:17,163 DEBUG [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36245 2024-12-12T05:43:17,163 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(2659): reportForDuty to master=ffbfd3107920,39985,1733982196449 with port=41391, startcode=1733982196671 2024-12-12T05:43:17,163 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(2659): reportForDuty to master=ffbfd3107920,39985,1733982196449 with port=45695, startcode=1733982196592 2024-12-12T05:43:17,163 DEBUG [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T05:43:17,164 DEBUG [RS:0;ffbfd3107920:45695 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:43:17,164 DEBUG [RS:2;ffbfd3107920:41391 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:43:17,165 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47617, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:43:17,165 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54923, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:43:17,165 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39985 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ffbfd3107920,41391,1733982196671 2024-12-12T05:43:17,166 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39985 {}] master.ServerManager(517): Registering regionserver=ffbfd3107920,41391,1733982196671 2024-12-12T05:43:17,167 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39985 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,167 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39985 {}] master.ServerManager(517): Registering regionserver=ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,167 DEBUG [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3 2024-12-12T05:43:17,168 DEBUG [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36245 2024-12-12T05:43:17,168 DEBUG [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T05:43:17,169 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3 2024-12-12T05:43:17,169 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36245 2024-12-12T05:43:17,169 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T05:43:17,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:17,200 DEBUG [RS:1;ffbfd3107920:39703 {}] zookeeper.ZKUtil(111): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ffbfd3107920,39703,1733982196629 2024-12-12T05:43:17,200 WARN [RS:1;ffbfd3107920:39703 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:17,200 INFO [RS:1;ffbfd3107920:39703 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-12T05:43:17,200 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ffbfd3107920,41391,1733982196671] 2024-12-12T05:43:17,200 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ffbfd3107920,45695,1733982196592] 2024-12-12T05:43:17,200 DEBUG [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,39703,1733982196629 2024-12-12T05:43:17,200 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ffbfd3107920,39703,1733982196629] 2024-12-12T05:43:17,200 DEBUG [RS:2;ffbfd3107920:41391 {}] zookeeper.ZKUtil(111): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ffbfd3107920,41391,1733982196671 2024-12-12T05:43:17,200 DEBUG [RS:0;ffbfd3107920:45695 {}] zookeeper.ZKUtil(111): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,200 WARN [RS:2;ffbfd3107920:41391 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:17,200 WARN [RS:0;ffbfd3107920:45695 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:43:17,200 INFO [RS:0;ffbfd3107920:45695 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-12T05:43:17,200 INFO [RS:2;ffbfd3107920:41391 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-12T05:43:17,200 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,201 DEBUG [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,41391,1733982196671 2024-12-12T05:43:17,206 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:43:17,206 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:43:17,206 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:43:17,209 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:43:17,209 INFO [RS:1;ffbfd3107920:39703 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:43:17,209 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,209 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T05:43:17,210 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:43:17,211 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T05:43:17,211 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,211 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,211 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,211 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,211 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,211 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,211 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:17,212 DEBUG [RS:1;ffbfd3107920:39703 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:17,214 INFO [RS:0;ffbfd3107920:45695 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:43:17,214 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,214 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:43:17,215 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,215 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,215 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,215 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,215 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,215 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39703,1733982196629-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:17,218 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T05:43:17,218 INFO [RS:2;ffbfd3107920:41391 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:43:17,218 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,218 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T05:43:17,219 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T05:43:17,219 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T05:43:17,219 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,219 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,219 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,219 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ffbfd3107920:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,220 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ffbfd3107920:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:43:17,221 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:17,221 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:17,221 DEBUG [RS:0;ffbfd3107920:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:17,221 DEBUG [RS:2;ffbfd3107920:41391 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:43:17,222 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,41391,1733982196671-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,223 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,45695,1733982196592-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:17,231 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T05:43:17,231 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39703,1733982196629-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,231 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,231 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.Replication(171): ffbfd3107920,39703,1733982196629 started 2024-12-12T05:43:17,232 WARN [ffbfd3107920:39985 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-12T05:43:17,238 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T05:43:17,238 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T05:43:17,238 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,45695,1733982196592-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,238 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,41391,1733982196671-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,238 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,238 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,238 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.Replication(171): ffbfd3107920,45695,1733982196592 started 2024-12-12T05:43:17,238 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.Replication(171): ffbfd3107920,41391,1733982196671 started 2024-12-12T05:43:17,244 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,244 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(1482): Serving as ffbfd3107920,39703,1733982196629, RpcServer on ffbfd3107920/172.17.0.2:39703, sessionid=0x100189a51650002 2024-12-12T05:43:17,244 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:43:17,244 DEBUG [RS:1;ffbfd3107920:39703 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ffbfd3107920,39703,1733982196629 2024-12-12T05:43:17,244 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,39703,1733982196629' 2024-12-12T05:43:17,244 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:43:17,245 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:43:17,245 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:43:17,245 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:43:17,245 DEBUG [RS:1;ffbfd3107920:39703 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ffbfd3107920,39703,1733982196629 2024-12-12T05:43:17,245 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,39703,1733982196629' 2024-12-12T05:43:17,246 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:43:17,246 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:43:17,246 DEBUG [RS:1;ffbfd3107920:39703 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:43:17,247 INFO [RS:1;ffbfd3107920:39703 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:43:17,247 INFO [RS:1;ffbfd3107920:39703 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:43:17,251 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,251 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,251 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(1482): Serving as ffbfd3107920,41391,1733982196671, RpcServer on ffbfd3107920/172.17.0.2:41391, sessionid=0x100189a51650003 2024-12-12T05:43:17,251 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1482): Serving as ffbfd3107920,45695,1733982196592, RpcServer on ffbfd3107920/172.17.0.2:45695, sessionid=0x100189a51650001 2024-12-12T05:43:17,251 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:43:17,251 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:43:17,251 DEBUG [RS:0;ffbfd3107920:45695 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,251 DEBUG [RS:2;ffbfd3107920:41391 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ffbfd3107920,41391,1733982196671 2024-12-12T05:43:17,251 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,45695,1733982196592' 2024-12-12T05:43:17,251 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,41391,1733982196671' 2024-12-12T05:43:17,251 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:43:17,251 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:43:17,252 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:43:17,252 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:43:17,252 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:43:17,252 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:43:17,252 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:43:17,252 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:43:17,252 DEBUG [RS:2;ffbfd3107920:41391 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ffbfd3107920,41391,1733982196671 2024-12-12T05:43:17,252 DEBUG [RS:0;ffbfd3107920:45695 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,252 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,41391,1733982196671' 2024-12-12T05:43:17,252 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ffbfd3107920,45695,1733982196592' 2024-12-12T05:43:17,252 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:43:17,253 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:43:17,253 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:43:17,253 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:43:17,253 DEBUG [RS:2;ffbfd3107920:41391 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:43:17,253 DEBUG [RS:0;ffbfd3107920:45695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:43:17,253 INFO [RS:2;ffbfd3107920:41391 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:43:17,253 INFO [RS:0;ffbfd3107920:45695 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:43:17,253 INFO [RS:2;ffbfd3107920:41391 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:43:17,253 INFO [RS:0;ffbfd3107920:45695 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:43:17,352 INFO [RS:1;ffbfd3107920:39703 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C39703%2C1733982196629, suffix=, logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,39703,1733982196629, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs, maxLogs=32 2024-12-12T05:43:17,356 INFO [RS:1;ffbfd3107920:39703 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ffbfd3107920%2C39703%2C1733982196629.1733982197355 2024-12-12T05:43:17,356 INFO [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C45695%2C1733982196592, suffix=, logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,45695,1733982196592, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs, maxLogs=32 2024-12-12T05:43:17,356 INFO [RS:2;ffbfd3107920:41391 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C41391%2C1733982196671, suffix=, logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,41391,1733982196671, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs, maxLogs=32 2024-12-12T05:43:17,361 INFO [RS:0;ffbfd3107920:45695 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ffbfd3107920%2C45695%2C1733982196592.1733982197361 2024-12-12T05:43:17,361 INFO [RS:2;ffbfd3107920:41391 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ffbfd3107920%2C41391%2C1733982196671.1733982197361 2024-12-12T05:43:17,368 INFO [RS:1;ffbfd3107920:39703 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,39703,1733982196629/ffbfd3107920%2C39703%2C1733982196629.1733982197355 2024-12-12T05:43:17,372 DEBUG [RS:1;ffbfd3107920:39703 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45337:45337),(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:40015:40015)] 2024-12-12T05:43:17,374 INFO [RS:2;ffbfd3107920:41391 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,41391,1733982196671/ffbfd3107920%2C41391%2C1733982196671.1733982197361 2024-12-12T05:43:17,374 INFO [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,45695,1733982196592/ffbfd3107920%2C45695%2C1733982196592.1733982197361 2024-12-12T05:43:17,375 DEBUG [RS:2;ffbfd3107920:41391 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:45337:45337),(127.0.0.1/127.0.0.1:40015:40015)] 2024-12-12T05:43:17,375 DEBUG [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:40015:40015),(127.0.0.1/127.0.0.1:45337:45337)] 2024-12-12T05:43:17,482 DEBUG [ffbfd3107920:39985 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-12T05:43:17,483 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(204): Hosts are {ffbfd3107920=0} racks are {/default-rack=0} 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T05:43:17,486 INFO [ffbfd3107920:39985 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T05:43:17,486 INFO [ffbfd3107920:39985 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T05:43:17,486 INFO [ffbfd3107920:39985 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T05:43:17,486 DEBUG [ffbfd3107920:39985 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T05:43:17,487 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,489 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ffbfd3107920,45695,1733982196592, state=OPENING 2024-12-12T05:43:17,505 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T05:43:17,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:17,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:17,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:17,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:17,557 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T05:43:17,557 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,558 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ffbfd3107920,45695,1733982196592}] 2024-12-12T05:43:17,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,716 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T05:43:17,718 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42011, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T05:43:17,725 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-12T05:43:17,726 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-12T05:43:17,728 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ffbfd3107920%2C45695%2C1733982196592.meta, suffix=.meta, logDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,45695,1733982196592, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs, maxLogs=32 2024-12-12T05:43:17,729 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ffbfd3107920%2C45695%2C1733982196592.meta.1733982197729.meta 2024-12-12T05:43:17,736 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/WALs/ffbfd3107920,45695,1733982196592/ffbfd3107920%2C45695%2C1733982196592.meta.1733982197729.meta 2024-12-12T05:43:17,740 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:45337:45337),(127.0.0.1/127.0.0.1:40015:40015)] 2024-12-12T05:43:17,742 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:43:17,742 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T05:43:17,742 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T05:43:17,742 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T05:43:17,742 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T05:43:17,742 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:17,742 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-12T05:43:17,743 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-12T05:43:17,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T05:43:17,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T05:43:17,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-12T05:43:17,747 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-12T05:43:17,747 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T05:43:17,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T05:43:17,749 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T05:43:17,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T05:43:17,751 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:43:17,752 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-12T05:43:17,753 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740 2024-12-12T05:43:17,754 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740 2024-12-12T05:43:17,756 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-12T05:43:17,756 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-12T05:43:17,756 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:43:17,758 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-12T05:43:17,759 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72426541, jitterRate=0.07923956215381622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:43:17,759 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-12T05:43:17,760 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733982197743Writing region info on filesystem at 1733982197743Initializing all the Stores at 1733982197744 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982197744Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982197744Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982197744Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733982197744Cleaning up temporary data from old regions at 1733982197756 (+12 ms)Running coprocessor post-open hooks at 1733982197759 (+3 ms)Region opened successfully at 1733982197760 (+1 ms) 2024-12-12T05:43:17,761 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733982197715 2024-12-12T05:43:17,764 DEBUG [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T05:43:17,765 INFO [RS_OPEN_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-12T05:43:17,765 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,767 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ffbfd3107920,45695,1733982196592, state=OPEN 2024-12-12T05:43:17,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:17,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:17,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:17,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:43:17,788 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ffbfd3107920,45695,1733982196592 2024-12-12T05:43:17,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:43:17,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T05:43:17,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ffbfd3107920,45695,1733982196592 in 231 msec 2024-12-12T05:43:17,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T05:43:17,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 713 msec 2024-12-12T05:43:17,798 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:43:17,798 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-12T05:43:17,800 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T05:43:17,800 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ffbfd3107920,45695,1733982196592, seqNum=-1] 2024-12-12T05:43:17,800 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:43:17,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47611, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:43:17,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 781 msec 2024-12-12T05:43:17,809 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733982197809, completionTime=-1 2024-12-12T05:43:17,809 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-12T05:43:17,809 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-12T05:43:17,811 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-12T05:43:17,812 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733982257811 2024-12-12T05:43:17,812 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733982317812 2024-12-12T05:43:17,812 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-12T05:43:17,812 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-12T05:43:17,812 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39985,1733982196449-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,812 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39985,1733982196449-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,812 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39985,1733982196449-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,813 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ffbfd3107920:39985, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,813 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,813 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,815 DEBUG [master/ffbfd3107920:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-12T05:43:17,817 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.087sec 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39985,1733982196449-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:43:17,818 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39985,1733982196449-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T05:43:17,820 DEBUG [master/ffbfd3107920:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-12T05:43:17,820 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T05:43:17,821 INFO [master/ffbfd3107920:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ffbfd3107920,39985,1733982196449-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:43:17,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2dd873, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:43:17,897 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ffbfd3107920,39985,-1 for getting cluster id 2024-12-12T05:43:17,898 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T05:43:17,900 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '749380f4-a42d-4464-afa2-04ba503bd57c' 2024-12-12T05:43:17,900 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T05:43:17,901 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "749380f4-a42d-4464-afa2-04ba503bd57c" 2024-12-12T05:43:17,901 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f8960d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:43:17,901 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ffbfd3107920,39985,-1] 2024-12-12T05:43:17,902 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T05:43:17,903 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:17,905 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T05:43:17,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dffb11d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:43:17,907 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T05:43:17,908 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ffbfd3107920,45695,1733982196592, seqNum=-1] 2024-12-12T05:43:17,909 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:43:17,910 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57348, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:43:17,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ffbfd3107920,39985,1733982196449 2024-12-12T05:43:17,913 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-12T05:43:17,914 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is ffbfd3107920,39985,1733982196449 2024-12-12T05:43:17,914 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6fd94868 2024-12-12T05:43:17,915 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:43:17,916 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48252, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:43:17,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:43:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-12T05:43:17,920 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:43:17,920 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:17,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-12T05:43:17,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:17,922 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:43:17,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741837_1013 (size=392) 2024-12-12T05:43:17,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741837_1013 (size=392) 2024-12-12T05:43:17,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741837_1013 (size=392) 2024-12-12T05:43:17,932 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 69408fe60df8abf686f7cc63607b2215, NAME => 'TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3 2024-12-12T05:43:17,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741838_1014 (size=51) 2024-12-12T05:43:17,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741838_1014 (size=51) 2024-12-12T05:43:17,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741838_1014 (size=51) 2024-12-12T05:43:17,940 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:17,940 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 69408fe60df8abf686f7cc63607b2215, disabling compactions & flushes 2024-12-12T05:43:17,940 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:17,940 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:17,940 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. after waiting 0 ms 2024-12-12T05:43:17,940 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:17,940 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:17,940 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 69408fe60df8abf686f7cc63607b2215: Waiting for close lock at 1733982197940Disabling compacts and flushes for region at 1733982197940Disabling writes for close at 1733982197940Writing region close event to WAL at 1733982197940Closed at 1733982197940 2024-12-12T05:43:17,942 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:43:17,942 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733982197942"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733982197942"}]},"ts":"1733982197942"} 2024-12-12T05:43:17,945 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-12T05:43:17,946 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:43:17,947 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982197946"}]},"ts":"1733982197946"} 2024-12-12T05:43:17,949 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-12T05:43:17,949 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ffbfd3107920=0} racks are {/default-rack=0} 2024-12-12T05:43:17,950 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T05:43:17,950 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T05:43:17,950 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T05:43:17,950 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T05:43:17,950 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T05:43:17,950 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T05:43:17,950 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T05:43:17,951 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T05:43:17,951 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T05:43:17,951 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T05:43:17,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=69408fe60df8abf686f7cc63607b2215, ASSIGN}] 2024-12-12T05:43:17,953 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=69408fe60df8abf686f7cc63607b2215, ASSIGN 2024-12-12T05:43:17,954 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=69408fe60df8abf686f7cc63607b2215, ASSIGN; state=OFFLINE, location=ffbfd3107920,45695,1733982196592; forceNewPlan=false, retain=false 2024-12-12T05:43:18,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:18,105 INFO [ffbfd3107920:39985 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T05:43:18,105 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=69408fe60df8abf686f7cc63607b2215, regionState=OPENING, regionLocation=ffbfd3107920,45695,1733982196592 2024-12-12T05:43:18,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=69408fe60df8abf686f7cc63607b2215, ASSIGN because future has completed 2024-12-12T05:43:18,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69408fe60df8abf686f7cc63607b2215, server=ffbfd3107920,45695,1733982196592}] 2024-12-12T05:43:18,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:18,270 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,270 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 69408fe60df8abf686f7cc63607b2215, NAME => 'TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:43:18,271 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,271 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:43:18,271 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,271 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,274 INFO [StoreOpener-69408fe60df8abf686f7cc63607b2215-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,275 INFO [StoreOpener-69408fe60df8abf686f7cc63607b2215-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69408fe60df8abf686f7cc63607b2215 columnFamilyName cf 2024-12-12T05:43:18,275 DEBUG [StoreOpener-69408fe60df8abf686f7cc63607b2215-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:43:18,276 INFO [StoreOpener-69408fe60df8abf686f7cc63607b2215-1 {}] regionserver.HStore(327): Store=69408fe60df8abf686f7cc63607b2215/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:43:18,276 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,277 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,277 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,278 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,278 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,280 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,282 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:43:18,283 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 69408fe60df8abf686f7cc63607b2215; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70401308, jitterRate=0.04906123876571655}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T05:43:18,283 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,283 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 69408fe60df8abf686f7cc63607b2215: Running coprocessor pre-open hook at 1733982198271Writing region info on filesystem at 1733982198271Initializing all the Stores at 1733982198273 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733982198273Cleaning up temporary data from old regions at 1733982198278 (+5 ms)Running coprocessor post-open hooks at 1733982198283 (+5 ms)Region opened successfully at 1733982198283 2024-12-12T05:43:18,285 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215., pid=6, masterSystemTime=1733982198263 2024-12-12T05:43:18,287 DEBUG [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,287 INFO [RS_OPEN_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,288 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=69408fe60df8abf686f7cc63607b2215, regionState=OPEN, openSeqNum=2, regionLocation=ffbfd3107920,45695,1733982196592 2024-12-12T05:43:18,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69408fe60df8abf686f7cc63607b2215, server=ffbfd3107920,45695,1733982196592 because future has completed 2024-12-12T05:43:18,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T05:43:18,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 69408fe60df8abf686f7cc63607b2215, server=ffbfd3107920,45695,1733982196592 in 184 msec 2024-12-12T05:43:18,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T05:43:18,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=69408fe60df8abf686f7cc63607b2215, ASSIGN in 347 msec 2024-12-12T05:43:18,304 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:43:18,304 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982198304"}]},"ts":"1733982198304"} 2024-12-12T05:43:18,307 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-12T05:43:18,309 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:43:18,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 392 msec 2024-12-12T05:43:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T05:43:18,554 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-12T05:43:18,554 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-12T05:43:18,554 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T05:43:18,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-12T05:43:18,559 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T05:43:18,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-12T05:43:18,563 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215., hostname=ffbfd3107920,45695,1733982196592, seqNum=2] 2024-12-12T05:43:18,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-12T05:43:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-12T05:43:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T05:43:18,570 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:43:18,572 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:43:18,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:43:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T05:43:18,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45695 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-12T05:43:18,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,727 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 69408fe60df8abf686f7cc63607b2215 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-12T05:43:18,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/.tmp/cf/aa8108ccc24f4fcf87ab08650f2f6e6a is 36, key is row/cf:cq/1733982198565/Put/seqid=0 2024-12-12T05:43:18,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741839_1015 (size=4787) 2024-12-12T05:43:18,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741839_1015 (size=4787) 2024-12-12T05:43:18,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741839_1015 (size=4787) 2024-12-12T05:43:18,755 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/.tmp/cf/aa8108ccc24f4fcf87ab08650f2f6e6a 2024-12-12T05:43:18,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/.tmp/cf/aa8108ccc24f4fcf87ab08650f2f6e6a as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/cf/aa8108ccc24f4fcf87ab08650f2f6e6a 2024-12-12T05:43:18,771 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/cf/aa8108ccc24f4fcf87ab08650f2f6e6a, entries=1, sequenceid=5, filesize=4.7 K 2024-12-12T05:43:18,773 INFO [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 69408fe60df8abf686f7cc63607b2215 in 46ms, sequenceid=5, compaction requested=false 2024-12-12T05:43:18,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 69408fe60df8abf686f7cc63607b2215: 2024-12-12T05:43:18,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ffbfd3107920:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-12T05:43:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-12T05:43:18,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-12T05:43:18,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 203 msec 2024-12-12T05:43:18,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 212 msec 2024-12-12T05:43:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T05:43:18,892 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-12T05:43:18,896 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-12T05:43:18,896 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T05:43:18,896 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:18,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,897 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T05:43:18,897 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T05:43:18,897 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=794070436, stopped=false 2024-12-12T05:43:18,897 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ffbfd3107920,39985,1733982196449 2024-12-12T05:43:18,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:18,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:43:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:18,956 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-12T05:43:18,957 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T05:43:18,958 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:18,958 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:18,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:18,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:18,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:43:18,959 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ffbfd3107920,45695,1733982196592' ***** 2024-12-12T05:43:18,959 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T05:43:18,959 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ffbfd3107920,39703,1733982196629' ***** 2024-12-12T05:43:18,960 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T05:43:18,960 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ffbfd3107920,41391,1733982196671' ***** 2024-12-12T05:43:18,960 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T05:43:18,960 INFO [RS:0;ffbfd3107920:45695 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:43:18,960 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T05:43:18,961 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T05:43:18,960 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T05:43:18,961 INFO [RS:0;ffbfd3107920:45695 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:43:18,961 INFO [RS:1;ffbfd3107920:39703 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:43:18,961 INFO [RS:1;ffbfd3107920:39703 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:43:18,961 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(3091): Received CLOSE for 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,961 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T05:43:18,961 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(959): stopping server ffbfd3107920,39703,1733982196629 2024-12-12T05:43:18,961 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T05:43:18,962 INFO [RS:1;ffbfd3107920:39703 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:18,962 INFO [RS:2;ffbfd3107920:41391 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:43:18,962 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T05:43:18,962 INFO [RS:2;ffbfd3107920:41391 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:43:18,962 INFO [RS:1;ffbfd3107920:39703 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ffbfd3107920:39703. 2024-12-12T05:43:18,962 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(959): stopping server ffbfd3107920,41391,1733982196671 2024-12-12T05:43:18,962 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(959): stopping server ffbfd3107920,45695,1733982196592 2024-12-12T05:43:18,962 INFO [RS:2;ffbfd3107920:41391 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:18,962 INFO [RS:0;ffbfd3107920:45695 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:18,962 DEBUG [RS:1;ffbfd3107920:39703 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:18,962 INFO [RS:2;ffbfd3107920:41391 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ffbfd3107920:41391. 2024-12-12T05:43:18,962 DEBUG [RS:1;ffbfd3107920:39703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,962 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 69408fe60df8abf686f7cc63607b2215, disabling compactions & flushes 2024-12-12T05:43:18,962 INFO [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,962 INFO [RS:0;ffbfd3107920:45695 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ffbfd3107920:45695. 2024-12-12T05:43:18,962 DEBUG [RS:2;ffbfd3107920:41391 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:18,962 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(976): stopping server ffbfd3107920,39703,1733982196629; all regions closed. 2024-12-12T05:43:18,962 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,962 DEBUG [RS:2;ffbfd3107920:41391 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,962 DEBUG [RS:0;ffbfd3107920:45695 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:43:18,962 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. after waiting 0 ms 2024-12-12T05:43:18,962 DEBUG [RS:0;ffbfd3107920:45695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,963 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,963 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(976): stopping server ffbfd3107920,41391,1733982196671; all regions closed. 2024-12-12T05:43:18,963 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:43:18,963 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:43:18,963 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:43:18,963 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-12T05:43:18,963 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,963 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-12T05:43:18,963 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,963 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1325): Online Regions={69408fe60df8abf686f7cc63607b2215=TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215., 1588230740=hbase:meta,,1.1588230740} 2024-12-12T05:43:18,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,963 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-12T05:43:18,963 DEBUG [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 69408fe60df8abf686f7cc63607b2215 2024-12-12T05:43:18,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,963 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-12T05:43:18,964 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,964 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-12T05:43:18,964 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,964 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T05:43:18,964 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,964 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:18,964 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T05:43:18,964 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-12T05:43:18,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741833_1009 (size=93) 2024-12-12T05:43:18,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741833_1009 (size=93) 2024-12-12T05:43:18,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741835_1011 (size=93) 2024-12-12T05:43:18,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741835_1011 (size=93) 2024-12-12T05:43:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741833_1009 (size=93) 2024-12-12T05:43:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741835_1011 (size=93) 2024-12-12T05:43:18,974 DEBUG [RS:1;ffbfd3107920:39703 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ffbfd3107920%2C39703%2C1733982196629:(num 1733982197355) 2024-12-12T05:43:18,974 DEBUG [RS:1;ffbfd3107920:39703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] hbase.ChoreService(370): Chore service for: regionserver/ffbfd3107920:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:43:18,974 INFO [regionserver/ffbfd3107920:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:18,974 INFO [RS:1;ffbfd3107920:39703 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39703 2024-12-12T05:43:18,976 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/default/TestHBaseWalOnEC/69408fe60df8abf686f7cc63607b2215/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T05:43:18,977 INFO [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,977 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 69408fe60df8abf686f7cc63607b2215: Waiting for close lock at 1733982198962Running coprocessor pre-close hooks at 1733982198962Disabling compacts and flushes for region at 1733982198962Disabling writes for close at 1733982198962Writing region close event to WAL at 1733982198971 (+9 ms)Running coprocessor post-close hooks at 1733982198977 (+6 ms)Closed at 1733982198977 2024-12-12T05:43:18,977 DEBUG [RS_CLOSE_REGION-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215. 2024-12-12T05:43:18,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:18,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ffbfd3107920,39703,1733982196629 2024-12-12T05:43:18,980 INFO [RS:1;ffbfd3107920:39703 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:18,984 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/info/6d69441254974d7aa38cb0a1c0dc44fc is 153, key is TestHBaseWalOnEC,,1733982197917.69408fe60df8abf686f7cc63607b2215./info:regioninfo/1733982198288/Put/seqid=0 2024-12-12T05:43:18,985 WARN [IPC Server handler 0 on default port 36245 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T05:43:18,985 WARN [IPC Server handler 0 on default port 36245 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T05:43:18,985 WARN [IPC Server handler 0 on default port 36245 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T05:43:18,988 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ffbfd3107920,39703,1733982196629] 2024-12-12T05:43:18,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741840_1016 (size=6637) 2024-12-12T05:43:18,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741840_1016 (size=6637) 2024-12-12T05:43:18,990 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/info/6d69441254974d7aa38cb0a1c0dc44fc 2024-12-12T05:43:18,996 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ffbfd3107920,39703,1733982196629 already deleted, retry=false 2024-12-12T05:43:18,996 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ffbfd3107920,39703,1733982196629 expired; onlineServers=2 2024-12-12T05:43:19,011 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/ns/32db3845488847389920d1db5a4bf95d is 43, key is default/ns:d/1733982197802/Put/seqid=0 2024-12-12T05:43:19,012 WARN [IPC Server handler 4 on default port 36245 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T05:43:19,012 WARN [IPC Server handler 4 on default port 36245 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T05:43:19,013 WARN [IPC Server handler 4 on default port 36245 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T05:43:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741841_1017 (size=5153) 2024-12-12T05:43:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741841_1017 (size=5153) 2024-12-12T05:43:19,018 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/ns/32db3845488847389920d1db5a4bf95d 2024-12-12T05:43:19,020 INFO [regionserver/ffbfd3107920:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:19,027 INFO [regionserver/ffbfd3107920:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:19,027 INFO [regionserver/ffbfd3107920:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:19,039 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/table/b017b4c5d3d34848a53da6cdd69de065 is 52, key is TestHBaseWalOnEC/table:state/1733982198304/Put/seqid=0 2024-12-12T05:43:19,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741842_1018 (size=5249) 2024-12-12T05:43:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741842_1018 (size=5249) 2024-12-12T05:43:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741842_1018 (size=5249) 2024-12-12T05:43:19,046 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/table/b017b4c5d3d34848a53da6cdd69de065 2024-12-12T05:43:19,055 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/info/6d69441254974d7aa38cb0a1c0dc44fc as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/info/6d69441254974d7aa38cb0a1c0dc44fc 2024-12-12T05:43:19,063 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/info/6d69441254974d7aa38cb0a1c0dc44fc, entries=10, sequenceid=11, filesize=6.5 K 2024-12-12T05:43:19,064 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/ns/32db3845488847389920d1db5a4bf95d as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/ns/32db3845488847389920d1db5a4bf95d 2024-12-12T05:43:19,072 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/ns/32db3845488847389920d1db5a4bf95d, entries=2, sequenceid=11, filesize=5.0 K 2024-12-12T05:43:19,073 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/.tmp/table/b017b4c5d3d34848a53da6cdd69de065 as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/table/b017b4c5d3d34848a53da6cdd69de065 2024-12-12T05:43:19,081 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/table/b017b4c5d3d34848a53da6cdd69de065, entries=2, sequenceid=11, filesize=5.1 K 2024-12-12T05:43:19,082 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-12-12T05:43:19,088 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-12T05:43:19,088 INFO [RS:1;ffbfd3107920:39703 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:19,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39703-0x100189a51650002, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,088 INFO [RS:1;ffbfd3107920:39703 {}] regionserver.HRegionServer(1031): Exiting; stopping=ffbfd3107920,39703,1733982196629; zookeeper connection closed. 2024-12-12T05:43:19,088 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T05:43:19,088 INFO [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-12T05:43:19,089 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14d376d4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14d376d4 2024-12-12T05:43:19,089 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733982198963Running coprocessor pre-close hooks at 1733982198963Disabling compacts and flushes for region at 1733982198963Disabling writes for close at 1733982198964 (+1 ms)Obtaining lock to block concurrent updates at 1733982198964Preparing flush snapshotting stores in 1588230740 at 1733982198964Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733982198964Flushing stores of hbase:meta,,1.1588230740 at 1733982198965 (+1 ms)Flushing 1588230740/info: creating writer at 1733982198965Flushing 1588230740/info: appending metadata at 1733982198983 (+18 ms)Flushing 1588230740/info: closing flushed file at 1733982198984 (+1 ms)Flushing 1588230740/ns: creating writer at 1733982198997 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733982199011 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733982199011Flushing 1588230740/table: creating writer at 1733982199025 (+14 ms)Flushing 1588230740/table: appending metadata at 1733982199038 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733982199038Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1107daf3: reopening flushed file at 1733982199054 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@86b1c49: reopening flushed file at 1733982199063 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50bcdba8: reopening flushed file at 1733982199072 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false at 1733982199082 (+10 ms)Writing region close event to WAL at 1733982199083 (+1 ms)Running coprocessor post-close hooks at 1733982199088 (+5 ms)Closed at 1733982199088 2024-12-12T05:43:19,089 DEBUG [RS_CLOSE_META-regionserver/ffbfd3107920:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T05:43:19,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T05:43:19,107 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T05:43:19,164 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(976): stopping server ffbfd3107920,45695,1733982196592; all regions closed. 2024-12-12T05:43:19,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,165 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,165 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,165 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,165 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741836_1012 (size=2751) 2024-12-12T05:43:19,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741836_1012 (size=2751) 2024-12-12T05:43:19,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741836_1012 (size=2751) 2024-12-12T05:43:19,172 DEBUG [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs 2024-12-12T05:43:19,172 INFO [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ffbfd3107920%2C45695%2C1733982196592.meta:.meta(num 1733982197729) 2024-12-12T05:43:19,172 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,172 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,172 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,173 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,173 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741834_1010 (size=1298) 2024-12-12T05:43:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741834_1010 (size=1298) 2024-12-12T05:43:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741834_1010 (size=1298) 2024-12-12T05:43:19,178 DEBUG [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs 2024-12-12T05:43:19,179 INFO [RS:0;ffbfd3107920:45695 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ffbfd3107920%2C45695%2C1733982196592:(num 1733982197361) 2024-12-12T05:43:19,179 DEBUG [RS:0;ffbfd3107920:45695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:19,179 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:19,179 INFO [RS:0;ffbfd3107920:45695 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:19,179 INFO [RS:0;ffbfd3107920:45695 {}] hbase.ChoreService(370): Chore service for: regionserver/ffbfd3107920:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:19,179 INFO [RS:0;ffbfd3107920:45695 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:19,179 INFO [regionserver/ffbfd3107920:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:19,179 INFO [RS:0;ffbfd3107920:45695 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45695 2024-12-12T05:43:19,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:19,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ffbfd3107920,45695,1733982196592 2024-12-12T05:43:19,205 INFO [RS:0;ffbfd3107920:45695 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:19,205 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$371/0x00007fd97c8f5858@3b8073d0 rejected from java.util.concurrent.ThreadPoolExecutor@3fbfe078[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-12T05:43:19,213 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ffbfd3107920,45695,1733982196592] 2024-12-12T05:43:19,221 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ffbfd3107920,45695,1733982196592 already deleted, retry=false 2024-12-12T05:43:19,221 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ffbfd3107920,45695,1733982196592 expired; onlineServers=1 2024-12-12T05:43:19,223 INFO [regionserver/ffbfd3107920:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-12T05:43:19,223 INFO [regionserver/ffbfd3107920:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-12T05:43:19,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,313 INFO [RS:0;ffbfd3107920:45695 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:19,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x100189a51650001, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,314 INFO [RS:0;ffbfd3107920:45695 {}] regionserver.HRegionServer(1031): Exiting; stopping=ffbfd3107920,45695,1733982196592; zookeeper connection closed. 2024-12-12T05:43:19,314 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@41a40c7c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@41a40c7c 2024-12-12T05:43:19,379 DEBUG [RS:2;ffbfd3107920:41391 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/oldWALs 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ffbfd3107920%2C41391%2C1733982196671:(num 1733982197361) 2024-12-12T05:43:19,380 DEBUG [RS:2;ffbfd3107920:41391 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] hbase.ChoreService(370): Chore service for: regionserver/ffbfd3107920:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:43:19,380 INFO [regionserver/ffbfd3107920:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:19,380 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:43:19,381 INFO [RS:2;ffbfd3107920:41391 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:19,381 INFO [RS:2;ffbfd3107920:41391 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41391 2024-12-12T05:43:19,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ffbfd3107920,41391,1733982196671 2024-12-12T05:43:19,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:43:19,404 INFO [RS:2;ffbfd3107920:41391 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:19,404 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ffbfd3107920,41391,1733982196671] 2024-12-12T05:43:19,421 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ffbfd3107920,41391,1733982196671 already deleted, retry=false 2024-12-12T05:43:19,421 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ffbfd3107920,41391,1733982196671 expired; onlineServers=0 2024-12-12T05:43:19,421 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ffbfd3107920,39985,1733982196449' ***** 2024-12-12T05:43:19,421 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T05:43:19,422 INFO [M:0;ffbfd3107920:39985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T05:43:19,422 INFO [M:0;ffbfd3107920:39985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T05:43:19,422 DEBUG [M:0;ffbfd3107920:39985 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T05:43:19,422 DEBUG [M:0;ffbfd3107920:39985 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T05:43:19,422 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T05:43:19,422 DEBUG [master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.small.0-1733982197034 {}] cleaner.HFileCleaner(306): Exit Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.small.0-1733982197034,5,FailOnTimeoutGroup] 2024-12-12T05:43:19,422 DEBUG [master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.large.0-1733982197034 {}] cleaner.HFileCleaner(306): Exit Thread[master/ffbfd3107920:0:becomeActiveMaster-HFileCleaner.large.0-1733982197034,5,FailOnTimeoutGroup] 2024-12-12T05:43:19,422 INFO [M:0;ffbfd3107920:39985 {}] hbase.ChoreService(370): Chore service for: master/ffbfd3107920:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-12T05:43:19,422 INFO [M:0;ffbfd3107920:39985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T05:43:19,423 DEBUG [M:0;ffbfd3107920:39985 {}] master.HMaster(1795): Stopping service threads 2024-12-12T05:43:19,423 INFO [M:0;ffbfd3107920:39985 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T05:43:19,423 INFO [M:0;ffbfd3107920:39985 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-12T05:43:19,423 INFO [M:0;ffbfd3107920:39985 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T05:43:19,423 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T05:43:19,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T05:43:19,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:43:19,430 DEBUG [M:0;ffbfd3107920:39985 {}] zookeeper.ZKUtil(347): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T05:43:19,430 WARN [M:0;ffbfd3107920:39985 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T05:43:19,431 INFO [M:0;ffbfd3107920:39985 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/.lastflushedseqids 2024-12-12T05:43:19,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741843_1019 (size=127) 2024-12-12T05:43:19,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741843_1019 (size=127) 2024-12-12T05:43:19,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741843_1019 (size=127) 2024-12-12T05:43:19,443 INFO [M:0;ffbfd3107920:39985 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-12T05:43:19,443 INFO [M:0;ffbfd3107920:39985 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T05:43:19,444 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T05:43:19,444 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:19,444 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:19,444 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T05:43:19,444 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:19,444 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-12T05:43:19,461 DEBUG [M:0;ffbfd3107920:39985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d633ee8d9fb140e89fe54a8efca5da1d is 82, key is hbase:meta,,1/info:regioninfo/1733982197765/Put/seqid=0 2024-12-12T05:43:19,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741844_1020 (size=5672) 2024-12-12T05:43:19,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741844_1020 (size=5672) 2024-12-12T05:43:19,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741844_1020 (size=5672) 2024-12-12T05:43:19,468 INFO [M:0;ffbfd3107920:39985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d633ee8d9fb140e89fe54a8efca5da1d 2024-12-12T05:43:19,488 DEBUG [M:0;ffbfd3107920:39985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d9e972a4b0c1472eaeed1c76556e2830 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733982198311/Put/seqid=0 2024-12-12T05:43:19,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741845_1021 (size=6439) 2024-12-12T05:43:19,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741845_1021 (size=6439) 2024-12-12T05:43:19,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741845_1021 (size=6439) 2024-12-12T05:43:19,496 INFO [M:0;ffbfd3107920:39985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d9e972a4b0c1472eaeed1c76556e2830 2024-12-12T05:43:19,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,513 INFO [RS:2;ffbfd3107920:41391 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:19,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41391-0x100189a51650003, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,513 INFO [RS:2;ffbfd3107920:41391 {}] regionserver.HRegionServer(1031): Exiting; stopping=ffbfd3107920,41391,1733982196671; zookeeper connection closed. 2024-12-12T05:43:19,513 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@168314a3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@168314a3 2024-12-12T05:43:19,514 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-12T05:43:19,516 DEBUG [M:0;ffbfd3107920:39985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d08fa2402c24519bbe9966cc3158949 is 69, key is ffbfd3107920,39703,1733982196629/rs:state/1733982197161/Put/seqid=0 2024-12-12T05:43:19,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741846_1022 (size=5294) 2024-12-12T05:43:19,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741846_1022 (size=5294) 2024-12-12T05:43:19,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741846_1022 (size=5294) 2024-12-12T05:43:19,523 INFO [M:0;ffbfd3107920:39985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d08fa2402c24519bbe9966cc3158949 2024-12-12T05:43:19,530 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d633ee8d9fb140e89fe54a8efca5da1d as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d633ee8d9fb140e89fe54a8efca5da1d 2024-12-12T05:43:19,536 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d633ee8d9fb140e89fe54a8efca5da1d, entries=8, sequenceid=72, filesize=5.5 K 2024-12-12T05:43:19,538 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d9e972a4b0c1472eaeed1c76556e2830 as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d9e972a4b0c1472eaeed1c76556e2830 2024-12-12T05:43:19,543 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d9e972a4b0c1472eaeed1c76556e2830, entries=8, sequenceid=72, filesize=6.3 K 2024-12-12T05:43:19,544 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d08fa2402c24519bbe9966cc3158949 as hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5d08fa2402c24519bbe9966cc3158949 2024-12-12T05:43:19,550 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49a54ec4-04ff-2025-efdd-4f66c6080af3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5d08fa2402c24519bbe9966cc3158949, entries=3, sequenceid=72, filesize=5.2 K 2024-12-12T05:43:19,552 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 107ms, sequenceid=72, compaction requested=false 2024-12-12T05:43:19,553 INFO [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:43:19,553 DEBUG [M:0;ffbfd3107920:39985 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733982199444Disabling compacts and flushes for region at 1733982199444Disabling writes for close at 1733982199444Obtaining lock to block concurrent updates at 1733982199444Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733982199444Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733982199444Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733982199445 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733982199445Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733982199461 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733982199461Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733982199474 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733982199488 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733982199488Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733982199502 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733982199515 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733982199515Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a7ec26d: reopening flushed file at 1733982199529 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d2bb547: reopening flushed file at 1733982199537 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e7710f7: reopening flushed file at 1733982199543 (+6 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 107ms, sequenceid=72, compaction requested=false at 1733982199552 (+9 ms)Writing region close event to WAL at 1733982199553 (+1 ms)Closed at 1733982199553 2024-12-12T05:43:19,553 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,553 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,553 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,554 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-12T05:43:19,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741830_1006 (size=32674) 2024-12-12T05:43:19,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36661 is added to blk_1073741830_1006 (size=32674) 2024-12-12T05:43:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44435 is added to blk_1073741830_1006 (size=32674) 2024-12-12T05:43:19,557 INFO [M:0;ffbfd3107920:39985 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-12T05:43:19,557 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T05:43:19,557 INFO [M:0;ffbfd3107920:39985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39985 2024-12-12T05:43:19,558 INFO [M:0;ffbfd3107920:39985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T05:43:19,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,672 INFO [M:0;ffbfd3107920:39985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T05:43:19,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39985-0x100189a51650000, quorum=127.0.0.1:59158, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:43:19,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22ace0e0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:19,679 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4246e5dd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:19,679 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:19,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@769ec274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:19,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ad282c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:19,682 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:43:19,682 WARN [BP-1612086000-172.17.0.2-1733982194562 heartbeating to localhost/127.0.0.1:36245 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:43:19,682 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:43:19,682 WARN [BP-1612086000-172.17.0.2-1733982194562 heartbeating to localhost/127.0.0.1:36245 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1612086000-172.17.0.2-1733982194562 (Datanode Uuid 5f66a038-0882-4551-bf2c-a045b4ecd1bc) service to localhost/127.0.0.1:36245 2024-12-12T05:43:19,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data5/current/BP-1612086000-172.17.0.2-1733982194562 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:19,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data6/current/BP-1612086000-172.17.0.2-1733982194562 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:19,683 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:43:19,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e4b56fd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:19,685 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29550204{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:19,685 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:19,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bedcc8f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:19,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7579e296{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:19,687 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:43:19,687 WARN [BP-1612086000-172.17.0.2-1733982194562 heartbeating to localhost/127.0.0.1:36245 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:43:19,687 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:43:19,687 WARN [BP-1612086000-172.17.0.2-1733982194562 heartbeating to localhost/127.0.0.1:36245 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1612086000-172.17.0.2-1733982194562 (Datanode Uuid 579349ef-5cd0-4bc2-85ee-334752a310c3) service to localhost/127.0.0.1:36245 2024-12-12T05:43:19,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data3/current/BP-1612086000-172.17.0.2-1733982194562 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:19,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data4/current/BP-1612086000-172.17.0.2-1733982194562 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:19,688 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:43:19,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@90c4d2d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:43:19,690 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71b2c5b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:19,690 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:19,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d78823c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:19,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e4b8c76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:19,691 WARN [BP-1612086000-172.17.0.2-1733982194562 heartbeating to localhost/127.0.0.1:36245 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:43:19,691 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:43:19,691 WARN [BP-1612086000-172.17.0.2-1733982194562 heartbeating to localhost/127.0.0.1:36245 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1612086000-172.17.0.2-1733982194562 (Datanode Uuid 31287333-2d80-4a57-a80b-c4cfba24c1b0) service to localhost/127.0.0.1:36245 2024-12-12T05:43:19,691 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:43:19,692 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data1/current/BP-1612086000-172.17.0.2-1733982194562 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:19,692 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/cluster_0ad39a1c-f5f9-fb27-f400-d2c6aaf292d0/data/data2/current/BP-1612086000-172.17.0.2-1733982194562 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:43:19,692 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:43:19,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ffee01{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T05:43:19,697 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66a76d08{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:43:19,698 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:43:19,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7427c398{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:43:19,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bc7756d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/497967e3-51c3-4554-eda4-de74555bee5e/hadoop.log.dir/,STOPPED} 2024-12-12T05:43:19,705 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-12T05:43:19,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-12T05:43:19,737 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 87) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=290 (was 298), ProcessCount=11 (was 11), AvailableMemoryMB=9382 (was 9523)