2024-11-09 11:51:52,540 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 11:51:52,554 main DEBUG Took 0.011776 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 11:51:52,555 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 11:51:52,555 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 11:51:52,556 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 11:51:52,557 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,567 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 11:51:52,588 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,590 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,591 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,592 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,593 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,593 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,594 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,595 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,596 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,596 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,597 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,598 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,599 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,599 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,600 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,600 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,601 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,601 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,602 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,602 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,603 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,604 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,604 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,605 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 11:51:52,605 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,606 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 11:51:52,608 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 11:51:52,610 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 11:51:52,612 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 11:51:52,613 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 11:51:52,615 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 11:51:52,616 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 11:51:52,627 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 11:51:52,631 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 11:51:52,633 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 11:51:52,634 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 11:51:52,635 main DEBUG createAppenders(={Console}) 2024-11-09 11:51:52,636 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-09 11:51:52,636 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 11:51:52,636 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-09 11:51:52,637 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 11:51:52,637 main DEBUG OutputStream closed 2024-11-09 11:51:52,638 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 11:51:52,638 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 11:51:52,638 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-09 11:51:52,733 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 11:51:52,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 11:51:52,737 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 11:51:52,739 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 11:51:52,740 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 11:51:52,741 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 11:51:52,741 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 11:51:52,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 11:51:52,742 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 11:51:52,743 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 11:51:52,743 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 11:51:52,744 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 11:51:52,744 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 11:51:52,745 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 11:51:52,745 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 11:51:52,746 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 11:51:52,746 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 11:51:52,747 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 11:51:52,750 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 11:51:52,751 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-09 11:51:52,751 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 11:51:52,752 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-09T11:51:52,773 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-09 11:51:52,777 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 11:51:52,778 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T11:51:53,075 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e 2024-11-09T11:51:53,100 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145, deleteOnExit=true 2024-11-09T11:51:53,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/test.cache.data in system properties and HBase conf 2024-11-09T11:51:53,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T11:51:53,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir in system properties and HBase conf 2024-11-09T11:51:53,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T11:51:53,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T11:51:53,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T11:51:53,210 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-09T11:51:53,322 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T11:51:53,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T11:51:53,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T11:51:53,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T11:51:53,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T11:51:53,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T11:51:53,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T11:51:53,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T11:51:53,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T11:51:53,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T11:51:53,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/nfs.dump.dir in system properties and HBase conf 2024-11-09T11:51:53,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/java.io.tmpdir in system properties and HBase conf 2024-11-09T11:51:53,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T11:51:53,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T11:51:53,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T11:51:54,462 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-09T11:51:54,533 INFO [Time-limited test {}] log.Log(170): Logging initialized @2713ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-09T11:51:54,604 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:51:54,667 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:51:54,686 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:51:54,687 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:51:54,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T11:51:54,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:51:54,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:51:54,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:51:54,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58dbf239{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/java.io.tmpdir/jetty-localhost-39791-hadoop-hdfs-3_4_1-tests_jar-_-any-2238639580402860430/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T11:51:54,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:39791} 2024-11-09T11:51:54,878 INFO [Time-limited test {}] server.Server(415): Started @3058ms 2024-11-09T11:51:55,431 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:51:55,438 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:51:55,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:51:55,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:51:55,440 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T11:51:55,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:51:55,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:51:55,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65462677{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/java.io.tmpdir/jetty-localhost-42451-hadoop-hdfs-3_4_1-tests_jar-_-any-3527891106775059919/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:51:55,543 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:42451} 2024-11-09T11:51:55,543 INFO [Time-limited test {}] server.Server(415): Started @3723ms 2024-11-09T11:51:55,591 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T11:51:55,694 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:51:55,699 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:51:55,700 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:51:55,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:51:55,701 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T11:51:55,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:51:55,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:51:55,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@513cab2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/java.io.tmpdir/jetty-localhost-41561-hadoop-hdfs-3_4_1-tests_jar-_-any-11206497558685421423/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:51:55,827 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:41561} 2024-11-09T11:51:55,827 INFO [Time-limited test {}] server.Server(415): Started @4007ms 2024-11-09T11:51:55,831 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T11:51:55,864 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:51:55,869 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:51:55,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:51:55,871 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:51:55,871 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T11:51:55,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:51:55,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:51:55,980 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@653e6301{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/java.io.tmpdir/jetty-localhost-46691-hadoop-hdfs-3_4_1-tests_jar-_-any-13255665219190224510/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:51:55,981 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:46691} 2024-11-09T11:51:55,981 INFO [Time-limited test {}] server.Server(415): Started @4161ms 2024-11-09T11:51:55,983 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T11:51:57,640 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data3/current/BP-809292392-172.17.0.2-1731153113877/current, will proceed with Du for space computation calculation, 2024-11-09T11:51:57,640 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data1/current/BP-809292392-172.17.0.2-1731153113877/current, will proceed with Du for space computation calculation, 2024-11-09T11:51:57,640 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data2/current/BP-809292392-172.17.0.2-1731153113877/current, will proceed with Du for space computation calculation, 2024-11-09T11:51:57,640 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data4/current/BP-809292392-172.17.0.2-1731153113877/current, will proceed with Du for space computation calculation, 2024-11-09T11:51:57,656 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data5/current/BP-809292392-172.17.0.2-1731153113877/current, will proceed with Du for space computation calculation, 2024-11-09T11:51:57,656 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data6/current/BP-809292392-172.17.0.2-1731153113877/current, will proceed with Du for space computation calculation, 2024-11-09T11:51:57,674 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T11:51:57,674 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T11:51:57,680 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T11:51:57,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ce4be35833f9c2b with lease ID 0x5e2e3bdb1fae128c: Processing first storage report for DS-de141fe7-366e-4fb2-a377-2f80327ce555 from datanode DatanodeRegistration(127.0.0.1:35339, datanodeUuid=00628b43-1e30-4aab-9f99-dfef2f36b11f, infoPort=36515, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877) 2024-11-09T11:51:57,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ce4be35833f9c2b with lease ID 0x5e2e3bdb1fae128c: from storage DS-de141fe7-366e-4fb2-a377-2f80327ce555 node DatanodeRegistration(127.0.0.1:35339, datanodeUuid=00628b43-1e30-4aab-9f99-dfef2f36b11f, infoPort=36515, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T11:51:57,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c5f5cdcd745191d with lease ID 0x5e2e3bdb1fae128b: Processing first storage report for DS-2f607617-3931-4624-8370-72a7a69ee114 from datanode DatanodeRegistration(127.0.0.1:36503, datanodeUuid=c7428fe4-f620-4632-a5aa-936818a86cf3, infoPort=46339, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877) 2024-11-09T11:51:57,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c5f5cdcd745191d with lease ID 0x5e2e3bdb1fae128b: from storage DS-2f607617-3931-4624-8370-72a7a69ee114 node DatanodeRegistration(127.0.0.1:36503, datanodeUuid=c7428fe4-f620-4632-a5aa-936818a86cf3, infoPort=46339, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:51:57,727 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a40680269cc921e with lease ID 0x5e2e3bdb1fae128a: Processing first storage report for DS-84133049-5905-4d93-82cd-c73362bee7d6 from datanode DatanodeRegistration(127.0.0.1:41587, datanodeUuid=1247d701-817a-4bce-82ae-efb2adb0e6d6, infoPort=45733, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877) 2024-11-09T11:51:57,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a40680269cc921e with lease ID 0x5e2e3bdb1fae128a: from storage DS-84133049-5905-4d93-82cd-c73362bee7d6 node DatanodeRegistration(127.0.0.1:41587, datanodeUuid=1247d701-817a-4bce-82ae-efb2adb0e6d6, infoPort=45733, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:51:57,727 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ce4be35833f9c2b with lease ID 0x5e2e3bdb1fae128c: Processing first storage report for DS-8afff15b-a786-4165-9555-af7874ca18fc from datanode DatanodeRegistration(127.0.0.1:35339, datanodeUuid=00628b43-1e30-4aab-9f99-dfef2f36b11f, infoPort=36515, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877) 2024-11-09T11:51:57,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ce4be35833f9c2b with lease ID 0x5e2e3bdb1fae128c: from storage DS-8afff15b-a786-4165-9555-af7874ca18fc node DatanodeRegistration(127.0.0.1:35339, datanodeUuid=00628b43-1e30-4aab-9f99-dfef2f36b11f, infoPort=36515, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:51:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c5f5cdcd745191d with lease ID 0x5e2e3bdb1fae128b: Processing first storage report for DS-f9f8396f-f020-4e8c-a9e6-cee6c2b0f846 from datanode DatanodeRegistration(127.0.0.1:36503, datanodeUuid=c7428fe4-f620-4632-a5aa-936818a86cf3, infoPort=46339, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877) 2024-11-09T11:51:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c5f5cdcd745191d with lease ID 0x5e2e3bdb1fae128b: from storage DS-f9f8396f-f020-4e8c-a9e6-cee6c2b0f846 node DatanodeRegistration(127.0.0.1:36503, datanodeUuid=c7428fe4-f620-4632-a5aa-936818a86cf3, infoPort=46339, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:51:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a40680269cc921e with lease ID 0x5e2e3bdb1fae128a: Processing first storage report for DS-1e9bd599-0f69-4e62-abc1-4fb565b6ede5 from datanode DatanodeRegistration(127.0.0.1:41587, datanodeUuid=1247d701-817a-4bce-82ae-efb2adb0e6d6, infoPort=45733, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877) 2024-11-09T11:51:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a40680269cc921e with lease ID 0x5e2e3bdb1fae128a: from storage DS-1e9bd599-0f69-4e62-abc1-4fb565b6ede5 node DatanodeRegistration(127.0.0.1:41587, datanodeUuid=1247d701-817a-4bce-82ae-efb2adb0e6d6, infoPort=45733, infoSecurePort=0, ipcPort=40455, storageInfo=lv=-57;cid=testClusterID;nsid=100718379;c=1731153113877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:51:57,801 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e 2024-11-09T11:51:57,866 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-09T11:51:57,926 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=84, ProcessCount=11, AvailableMemoryMB=6148 2024-11-09T11:51:57,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T11:51:57,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-09T11:51:57,999 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/zookeeper_0, clientPort=60726, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T11:51:58,008 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60726 2024-11-09T11:51:58,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:58,035 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:58,120 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:51:58,120 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:51:58,162 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:53794 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53794 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:51:58,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-09T11:51:58,582 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:51:58,596 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934 with version=8 2024-11-09T11:51:58,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/hbase-staging 2024-11-09T11:51:58,689 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-09T11:51:58,922 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:51:58,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:58,931 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:58,935 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:51:58,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:58,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:51:59,060 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T11:51:59,111 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-09T11:51:59,119 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-09T11:51:59,122 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:51:59,143 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 97255 (auto-detected) 2024-11-09T11:51:59,144 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-09T11:51:59,159 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43655 2024-11-09T11:51:59,178 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43655 connecting to ZooKeeper ensemble=127.0.0.1:60726 2024-11-09T11:51:59,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:436550x0, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:51:59,316 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43655-0x1011f703ec90000 connected 2024-11-09T11:51:59,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:51:59,424 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934, hbase.cluster.distributed=false 2024-11-09T11:51:59,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:51:59,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43655 2024-11-09T11:51:59,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43655 2024-11-09T11:51:59,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43655 2024-11-09T11:51:59,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43655 2024-11-09T11:51:59,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43655 2024-11-09T11:51:59,542 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:51:59,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,544 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:51:59,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:51:59,546 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T11:51:59,549 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:51:59,550 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34739 2024-11-09T11:51:59,551 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34739 connecting to ZooKeeper ensemble=127.0.0.1:60726 2024-11-09T11:51:59,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,556 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347390x0, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:51:59,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:347390x0, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:51:59,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34739-0x1011f703ec90001 connected 2024-11-09T11:51:59,577 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T11:51:59,586 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T11:51:59,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T11:51:59,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:51:59,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34739 2024-11-09T11:51:59,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34739 2024-11-09T11:51:59,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34739 2024-11-09T11:51:59,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34739 2024-11-09T11:51:59,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34739 2024-11-09T11:51:59,617 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:51:59,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,618 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,618 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:51:59,618 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,619 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:51:59,619 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T11:51:59,619 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:51:59,620 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39789 2024-11-09T11:51:59,622 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39789 connecting to ZooKeeper ensemble=127.0.0.1:60726 2024-11-09T11:51:59,623 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397890x0, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:51:59,644 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:397890x0, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:51:59,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39789-0x1011f703ec90002 connected 2024-11-09T11:51:59,644 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T11:51:59,645 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T11:51:59,646 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T11:51:59,648 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:51:59,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39789 2024-11-09T11:51:59,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39789 2024-11-09T11:51:59,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39789 2024-11-09T11:51:59,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39789 2024-11-09T11:51:59,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39789 2024-11-09T11:51:59,670 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:51:59,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,670 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:51:59,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:51:59,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:51:59,671 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T11:51:59,671 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:51:59,672 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45225 2024-11-09T11:51:59,673 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45225 connecting to ZooKeeper ensemble=127.0.0.1:60726 2024-11-09T11:51:59,675 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,677 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452250x0, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:51:59,689 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452250x0, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:51:59,689 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45225-0x1011f703ec90003 connected 2024-11-09T11:51:59,690 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T11:51:59,692 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T11:51:59,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T11:51:59,695 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:51:59,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45225 2024-11-09T11:51:59,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45225 2024-11-09T11:51:59,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45225 2024-11-09T11:51:59,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45225 2024-11-09T11:51:59,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45225 2024-11-09T11:51:59,719 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3264b4bbda9f:43655 2024-11-09T11:51:59,720 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3264b4bbda9f,43655,1731153118776 2024-11-09T11:51:59,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,740 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3264b4bbda9f,43655,1731153118776 2024-11-09T11:51:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T11:51:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T11:51:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T11:51:59,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,770 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T11:51:59,771 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3264b4bbda9f,43655,1731153118776 from backup master directory 2024-11-09T11:51:59,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3264b4bbda9f,43655,1731153118776 2024-11-09T11:51:59,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:51:59,784 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:51:59,784 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3264b4bbda9f,43655,1731153118776 2024-11-09T11:51:59,786 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-09T11:51:59,788 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-09T11:51:59,846 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/hbase.id] with ID: 8fdf4288-ebe5-4288-a8e5-08757ab9ee81 2024-11-09T11:51:59,846 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/.tmp/hbase.id 2024-11-09T11:51:59,853 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:51:59,853 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:51:59,857 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:54576 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54576 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:51:59,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-09T11:51:59,866 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:51:59,866 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/.tmp/hbase.id]:[hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/hbase.id] 2024-11-09T11:51:59,916 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:51:59,921 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T11:51:59,939 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-09T11:51:59,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:51:59,964 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:51:59,964 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:51:59,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:54180 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41587:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54180 dst: /127.0.0.1:41587 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:51:59,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-09T11:51:59,973 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:51:59,987 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T11:51:59,989 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T11:51:59,994 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T11:52:00,021 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,021 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,026 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:54604 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54604 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:00,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-09T11:52:00,032 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:00,047 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store 2024-11-09T11:52:00,061 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,062 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:53820 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53820 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:00,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-09T11:52:00,072 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:00,076 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-09T11:52:00,079 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:00,081 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T11:52:00,081 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:00,081 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:00,082 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T11:52:00,083 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:00,083 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:00,084 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731153120080Disabling compacts and flushes for region at 1731153120081 (+1 ms)Disabling writes for close at 1731153120083 (+2 ms)Writing region close event to WAL at 1731153120083Closed at 1731153120083 2024-11-09T11:52:00,086 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/.initializing 2024-11-09T11:52:00,086 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/WALs/3264b4bbda9f,43655,1731153118776 2024-11-09T11:52:00,094 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T11:52:00,108 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C43655%2C1731153118776, suffix=, logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/WALs/3264b4bbda9f,43655,1731153118776, archiveDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/oldWALs, maxLogs=10 2024-11-09T11:52:00,136 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/WALs/3264b4bbda9f,43655,1731153118776/3264b4bbda9f%2C43655%2C1731153118776.1731153120112, exclude list is [], retry=0 2024-11-09T11:52:00,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:00,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35339,DS-de141fe7-366e-4fb2-a377-2f80327ce555,DISK] 2024-11-09T11:52:00,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41587,DS-84133049-5905-4d93-82cd-c73362bee7d6,DISK] 2024-11-09T11:52:00,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36503,DS-2f607617-3931-4624-8370-72a7a69ee114,DISK] 2024-11-09T11:52:00,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-09T11:52:00,199 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/WALs/3264b4bbda9f,43655,1731153118776/3264b4bbda9f%2C43655%2C1731153118776.1731153120112 2024-11-09T11:52:00,199 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45733:45733),(127.0.0.1/127.0.0.1:46339:46339),(127.0.0.1/127.0.0.1:36515:36515)] 2024-11-09T11:52:00,200 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T11:52:00,200 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:00,203 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,203 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,237 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T11:52:00,264 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:00,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T11:52:00,271 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:00,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T11:52:00,275 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:00,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T11:52:00,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:00,280 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,283 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,284 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,289 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,290 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,293 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T11:52:00,297 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:00,304 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T11:52:00,305 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59039669, jitterRate=-0.12024037539958954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T11:52:00,312 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731153120215Initializing all the Stores at 1731153120217 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153120217Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153120218 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153120218Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153120218Cleaning up temporary data from old regions at 1731153120290 (+72 ms)Region opened successfully at 1731153120312 (+22 ms) 2024-11-09T11:52:00,313 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T11:52:00,341 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d8f8f03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:00,367 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T11:52:00,377 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T11:52:00,377 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T11:52:00,379 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T11:52:00,380 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-09T11:52:00,385 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-09T11:52:00,385 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T11:52:00,408 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T11:52:00,415 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T11:52:00,464 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T11:52:00,466 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T11:52:00,468 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T11:52:00,478 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T11:52:00,479 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T11:52:00,483 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T11:52:00,495 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T11:52:00,497 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T11:52:00,509 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T11:52:00,531 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T11:52:00,541 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T11:52:00,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:00,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:00,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:00,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:00,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,554 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3264b4bbda9f,43655,1731153118776, sessionid=0x1011f703ec90000, setting cluster-up flag (Was=false) 2024-11-09T11:52:00,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,615 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T11:52:00,620 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3264b4bbda9f,43655,1731153118776 2024-11-09T11:52:00,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:00,678 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T11:52:00,681 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3264b4bbda9f,43655,1731153118776 2024-11-09T11:52:00,691 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T11:52:00,709 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(746): ClusterId : 8fdf4288-ebe5-4288-a8e5-08757ab9ee81 2024-11-09T11:52:00,710 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(746): ClusterId : 8fdf4288-ebe5-4288-a8e5-08757ab9ee81 2024-11-09T11:52:00,710 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(746): ClusterId : 8fdf4288-ebe5-4288-a8e5-08757ab9ee81 2024-11-09T11:52:00,712 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T11:52:00,712 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T11:52:00,712 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T11:52:00,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-09T11:52:00,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-09T11:52:00,739 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T11:52:00,739 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T11:52:00,739 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T11:52:00,739 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T11:52:00,739 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T11:52:00,739 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T11:52:00,763 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T11:52:00,763 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T11:52:00,763 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T11:52:00,763 DEBUG [RS:1;3264b4bbda9f:39789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@513ed9af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:00,763 DEBUG [RS:0;3264b4bbda9f:34739 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b0c38b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:00,763 DEBUG [RS:2;3264b4bbda9f:45225 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5509da1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:00,766 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:00,776 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T11:52:00,782 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T11:52:00,783 DEBUG [RS:2;3264b4bbda9f:45225 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3264b4bbda9f:45225 2024-11-09T11:52:00,783 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3264b4bbda9f:34739 2024-11-09T11:52:00,783 DEBUG [RS:1;3264b4bbda9f:39789 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3264b4bbda9f:39789 2024-11-09T11:52:00,787 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T11:52:00,787 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T11:52:00,787 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T11:52:00,788 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T11:52:00,788 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T11:52:00,788 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T11:52:00,788 DEBUG [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T11:52:00,788 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T11:52:00,788 DEBUG [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T11:52:00,790 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(2659): reportForDuty to master=3264b4bbda9f,43655,1731153118776 with port=39789, startcode=1731153119617 2024-11-09T11:52:00,790 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(2659): reportForDuty to master=3264b4bbda9f,43655,1731153118776 with port=45225, startcode=1731153119669 2024-11-09T11:52:00,790 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(2659): reportForDuty to master=3264b4bbda9f,43655,1731153118776 with port=34739, startcode=1731153119511 2024-11-09T11:52:00,788 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3264b4bbda9f,43655,1731153118776 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T11:52:00,795 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:00,796 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:00,796 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:00,796 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:00,796 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3264b4bbda9f:0, corePoolSize=10, maxPoolSize=10 2024-11-09T11:52:00,796 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,796 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:00,797 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,804 DEBUG [RS:2;3264b4bbda9f:45225 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T11:52:00,804 DEBUG [RS:0;3264b4bbda9f:34739 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T11:52:00,804 DEBUG [RS:1;3264b4bbda9f:39789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T11:52:00,804 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:00,805 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T11:52:00,805 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731153150805 2024-11-09T11:52:00,807 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T11:52:00,808 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T11:52:00,810 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,810 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T11:52:00,812 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T11:52:00,812 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T11:52:00,813 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T11:52:00,813 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T11:52:00,816 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,822 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T11:52:00,823 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T11:52:00,824 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T11:52:00,824 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,824 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,829 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T11:52:00,829 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T11:52:00,833 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.large.0-1731153120830,5,FailOnTimeoutGroup] 2024-11-09T11:52:00,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:54646 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54646 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:00,836 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.small.0-1731153120833,5,FailOnTimeoutGroup] 2024-11-09T11:52:00,836 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,837 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T11:52:00,839 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,839 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-09T11:52:00,848 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:00,849 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T11:52:00,849 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52779, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T11:52:00,849 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T11:52:00,849 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T11:52:00,850 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934 2024-11-09T11:52:00,856 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:00,858 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] master.ServerManager(517): Registering regionserver=3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:00,863 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,863 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:00,871 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:00,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:53852 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53852 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:00,871 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] master.ServerManager(517): Registering regionserver=3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:00,875 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934 2024-11-09T11:52:00,875 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40827 2024-11-09T11:52:00,875 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T11:52:00,877 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:00,877 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] master.ServerManager(517): Registering regionserver=3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:00,877 DEBUG [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934 2024-11-09T11:52:00,877 DEBUG [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40827 2024-11-09T11:52:00,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-09T11:52:00,878 DEBUG [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T11:52:00,879 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:00,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:00,882 DEBUG [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934 2024-11-09T11:52:00,882 DEBUG [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40827 2024-11-09T11:52:00,882 DEBUG [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T11:52:00,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T11:52:00,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T11:52:00,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T11:52:00,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:00,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T11:52:00,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T11:52:00,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,894 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:00,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T11:52:00,898 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T11:52:00,898 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:00,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T11:52:00,902 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T11:52:00,903 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:00,903 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:00,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T11:52:00,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740 2024-11-09T11:52:00,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740 2024-11-09T11:52:00,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T11:52:00,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T11:52:00,910 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T11:52:00,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T11:52:00,921 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T11:52:00,922 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71539138, jitterRate=0.06601622700691223}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T11:52:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731153120882Initializing all the Stores at 1731153120884 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153120884Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153120884Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153120884Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153120884Cleaning up temporary data from old regions at 1731153120909 (+25 ms)Region opened successfully at 1731153120927 (+18 ms) 2024-11-09T11:52:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T11:52:00,928 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T11:52:00,928 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T11:52:00,928 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T11:52:00,928 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T11:52:00,930 DEBUG [RS:0;3264b4bbda9f:34739 {}] zookeeper.ZKUtil(111): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:00,930 WARN [RS:0;3264b4bbda9f:34739 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:00,931 DEBUG [RS:2;3264b4bbda9f:45225 {}] zookeeper.ZKUtil(111): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:00,931 INFO [RS:0;3264b4bbda9f:34739 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T11:52:00,931 WARN [RS:2;3264b4bbda9f:45225 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:00,931 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T11:52:00,931 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:00,931 INFO [RS:2;3264b4bbda9f:45225 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T11:52:00,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731153120927Disabling compacts and flushes for region at 1731153120927Disabling writes for close at 1731153120928 (+1 ms)Writing region close event to WAL at 1731153120930 (+2 ms)Closed at 1731153120931 (+1 ms) 2024-11-09T11:52:00,931 DEBUG [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:00,932 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3264b4bbda9f,39789,1731153119617] 2024-11-09T11:52:00,932 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3264b4bbda9f,34739,1731153119511] 2024-11-09T11:52:00,932 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3264b4bbda9f,45225,1731153119669] 2024-11-09T11:52:00,932 DEBUG [RS:1;3264b4bbda9f:39789 {}] zookeeper.ZKUtil(111): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:00,932 WARN [RS:1;3264b4bbda9f:39789 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:00,932 INFO [RS:1;3264b4bbda9f:39789 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T11:52:00,933 DEBUG [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:00,934 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:00,934 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T11:52:00,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T11:52:00,950 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T11:52:00,956 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T11:52:00,960 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T11:52:00,960 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T11:52:00,960 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T11:52:00,979 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T11:52:00,979 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T11:52:00,979 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T11:52:00,984 INFO [RS:0;3264b4bbda9f:34739 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T11:52:00,984 INFO [RS:1;3264b4bbda9f:39789 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T11:52:00,984 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,985 INFO [RS:2;3264b4bbda9f:45225 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T11:52:00,985 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,985 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,986 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T11:52:00,986 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T11:52:00,986 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T11:52:00,992 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T11:52:00,992 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T11:52:00,992 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T11:52:00,994 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,994 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,994 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:00,994 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,994 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:00,994 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:00,995 DEBUG [RS:0;3264b4bbda9f:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:00,995 DEBUG [RS:2;3264b4bbda9f:45225 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:00,995 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,996 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,997 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:00,997 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:00,997 DEBUG [RS:1;3264b4bbda9f:39789 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:01,002 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,002 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,002 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,002 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,002 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,002 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,002 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34739,1731153119511-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,45225,1731153119669-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,003 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,39789,1731153119617-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:01,022 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T11:52:01,022 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T11:52:01,022 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T11:52:01,024 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,39789,1731153119617-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,024 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,45225,1731153119669-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,024 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34739,1731153119511-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,024 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,024 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,024 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,024 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.Replication(171): 3264b4bbda9f,34739,1731153119511 started 2024-11-09T11:52:01,024 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.Replication(171): 3264b4bbda9f,39789,1731153119617 started 2024-11-09T11:52:01,024 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.Replication(171): 3264b4bbda9f,45225,1731153119669 started 2024-11-09T11:52:01,041 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,041 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,041 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(1482): Serving as 3264b4bbda9f,45225,1731153119669, RpcServer on 3264b4bbda9f/172.17.0.2:45225, sessionid=0x1011f703ec90003 2024-11-09T11:52:01,041 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(1482): Serving as 3264b4bbda9f,39789,1731153119617, RpcServer on 3264b4bbda9f/172.17.0.2:39789, sessionid=0x1011f703ec90002 2024-11-09T11:52:01,042 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T11:52:01,042 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T11:52:01,042 DEBUG [RS:2;3264b4bbda9f:45225 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:01,042 DEBUG [RS:1;3264b4bbda9f:39789 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:01,042 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,042 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,45225,1731153119669' 2024-11-09T11:52:01,042 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,39789,1731153119617' 2024-11-09T11:52:01,042 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1482): Serving as 3264b4bbda9f,34739,1731153119511, RpcServer on 3264b4bbda9f/172.17.0.2:34739, sessionid=0x1011f703ec90001 2024-11-09T11:52:01,042 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T11:52:01,042 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T11:52:01,042 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T11:52:01,042 DEBUG [RS:0;3264b4bbda9f:34739 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:01,042 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,34739,1731153119511' 2024-11-09T11:52:01,042 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T11:52:01,043 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T11:52:01,043 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T11:52:01,043 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T11:52:01,044 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T11:52:01,044 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T11:52:01,044 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T11:52:01,044 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T11:52:01,044 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T11:52:01,044 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T11:52:01,044 DEBUG [RS:0;3264b4bbda9f:34739 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:01,044 DEBUG [RS:2;3264b4bbda9f:45225 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:01,044 DEBUG [RS:1;3264b4bbda9f:39789 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:01,044 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,34739,1731153119511' 2024-11-09T11:52:01,044 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,39789,1731153119617' 2024-11-09T11:52:01,044 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,45225,1731153119669' 2024-11-09T11:52:01,044 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T11:52:01,044 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T11:52:01,044 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T11:52:01,045 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T11:52:01,045 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T11:52:01,045 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T11:52:01,045 DEBUG [RS:0;3264b4bbda9f:34739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T11:52:01,045 INFO [RS:0;3264b4bbda9f:34739 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T11:52:01,045 DEBUG [RS:1;3264b4bbda9f:39789 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T11:52:01,045 INFO [RS:1;3264b4bbda9f:39789 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T11:52:01,045 INFO [RS:0;3264b4bbda9f:34739 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T11:52:01,045 DEBUG [RS:2;3264b4bbda9f:45225 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T11:52:01,045 INFO [RS:1;3264b4bbda9f:39789 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T11:52:01,045 INFO [RS:2;3264b4bbda9f:45225 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T11:52:01,046 INFO [RS:2;3264b4bbda9f:45225 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T11:52:01,107 WARN [3264b4bbda9f:43655 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T11:52:01,156 INFO [RS:1;3264b4bbda9f:39789 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T11:52:01,156 INFO [RS:2;3264b4bbda9f:45225 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T11:52:01,156 INFO [RS:0;3264b4bbda9f:34739 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T11:52:01,159 INFO [RS:2;3264b4bbda9f:45225 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C45225%2C1731153119669, suffix=, logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,45225,1731153119669, archiveDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs, maxLogs=32 2024-11-09T11:52:01,159 INFO [RS:1;3264b4bbda9f:39789 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C39789%2C1731153119617, suffix=, logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,39789,1731153119617, archiveDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs, maxLogs=32 2024-11-09T11:52:01,159 INFO [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C34739%2C1731153119511, suffix=, logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511, archiveDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs, maxLogs=32 2024-11-09T11:52:01,174 DEBUG [RS:2;3264b4bbda9f:45225 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,45225,1731153119669/3264b4bbda9f%2C45225%2C1731153119669.1731153121162, exclude list is [], retry=0 2024-11-09T11:52:01,175 DEBUG [RS:0;3264b4bbda9f:34739 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511/3264b4bbda9f%2C34739%2C1731153119511.1731153121162, exclude list is [], retry=0 2024-11-09T11:52:01,177 DEBUG [RS:1;3264b4bbda9f:39789 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,39789,1731153119617/3264b4bbda9f%2C39789%2C1731153119617.1731153121162, exclude list is [], retry=0 2024-11-09T11:52:01,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35339,DS-de141fe7-366e-4fb2-a377-2f80327ce555,DISK] 2024-11-09T11:52:01,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41587,DS-84133049-5905-4d93-82cd-c73362bee7d6,DISK] 2024-11-09T11:52:01,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36503,DS-2f607617-3931-4624-8370-72a7a69ee114,DISK] 2024-11-09T11:52:01,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41587,DS-84133049-5905-4d93-82cd-c73362bee7d6,DISK] 2024-11-09T11:52:01,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35339,DS-de141fe7-366e-4fb2-a377-2f80327ce555,DISK] 2024-11-09T11:52:01,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36503,DS-2f607617-3931-4624-8370-72a7a69ee114,DISK] 2024-11-09T11:52:01,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35339,DS-de141fe7-366e-4fb2-a377-2f80327ce555,DISK] 2024-11-09T11:52:01,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41587,DS-84133049-5905-4d93-82cd-c73362bee7d6,DISK] 2024-11-09T11:52:01,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36503,DS-2f607617-3931-4624-8370-72a7a69ee114,DISK] 2024-11-09T11:52:01,222 INFO [RS:2;3264b4bbda9f:45225 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,45225,1731153119669/3264b4bbda9f%2C45225%2C1731153119669.1731153121162 2024-11-09T11:52:01,222 DEBUG [RS:2;3264b4bbda9f:45225 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36515:36515),(127.0.0.1/127.0.0.1:46339:46339),(127.0.0.1/127.0.0.1:45733:45733)] 2024-11-09T11:52:01,224 INFO [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511/3264b4bbda9f%2C34739%2C1731153119511.1731153121162 2024-11-09T11:52:01,224 INFO [RS:1;3264b4bbda9f:39789 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,39789,1731153119617/3264b4bbda9f%2C39789%2C1731153119617.1731153121162 2024-11-09T11:52:01,228 DEBUG [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36515:36515),(127.0.0.1/127.0.0.1:46339:46339),(127.0.0.1/127.0.0.1:45733:45733)] 2024-11-09T11:52:01,228 DEBUG [RS:1;3264b4bbda9f:39789 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36515:36515),(127.0.0.1/127.0.0.1:45733:45733),(127.0.0.1/127.0.0.1:46339:46339)] 2024-11-09T11:52:01,359 DEBUG [3264b4bbda9f:43655 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T11:52:01,366 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(204): Hosts are {3264b4bbda9f=0} racks are {/default-rack=0} 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T11:52:01,373 INFO [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T11:52:01,373 INFO [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T11:52:01,373 INFO [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T11:52:01,373 DEBUG [3264b4bbda9f:43655 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T11:52:01,380 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:01,387 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3264b4bbda9f,34739,1731153119511, state=OPENING 2024-11-09T11:52:01,432 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T11:52:01,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:01,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:01,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:01,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:01,444 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,444 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,444 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,444 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,446 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T11:52:01,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3264b4bbda9f,34739,1731153119511}] 2024-11-09T11:52:01,628 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T11:52:01,630 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43651, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T11:52:01,643 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T11:52:01,643 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T11:52:01,644 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-09T11:52:01,647 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C34739%2C1731153119511.meta, suffix=.meta, logDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511, archiveDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs, maxLogs=32 2024-11-09T11:52:01,663 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511/3264b4bbda9f%2C34739%2C1731153119511.meta.1731153121650.meta, exclude list is [], retry=0 2024-11-09T11:52:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41587,DS-84133049-5905-4d93-82cd-c73362bee7d6,DISK] 2024-11-09T11:52:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36503,DS-2f607617-3931-4624-8370-72a7a69ee114,DISK] 2024-11-09T11:52:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35339,DS-de141fe7-366e-4fb2-a377-2f80327ce555,DISK] 2024-11-09T11:52:01,671 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/WALs/3264b4bbda9f,34739,1731153119511/3264b4bbda9f%2C34739%2C1731153119511.meta.1731153121650.meta 2024-11-09T11:52:01,671 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45733:45733),(127.0.0.1/127.0.0.1:46339:46339),(127.0.0.1/127.0.0.1:36515:36515)] 2024-11-09T11:52:01,671 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T11:52:01,673 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T11:52:01,675 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T11:52:01,679 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T11:52:01,683 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T11:52:01,683 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:01,683 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T11:52:01,683 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T11:52:01,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T11:52:01,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T11:52:01,688 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:01,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:01,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T11:52:01,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T11:52:01,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:01,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:01,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T11:52:01,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T11:52:01,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:01,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:01,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T11:52:01,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T11:52:01,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:01,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:01,696 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T11:52:01,698 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740 2024-11-09T11:52:01,700 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740 2024-11-09T11:52:01,703 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T11:52:01,703 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T11:52:01,704 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T11:52:01,707 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T11:52:01,709 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67628005, jitterRate=0.007735803723335266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T11:52:01,709 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T11:52:01,711 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731153121684Writing region info on filesystem at 1731153121684Initializing all the Stores at 1731153121686 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153121686Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153121686Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153121686Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153121686Cleaning up temporary data from old regions at 1731153121703 (+17 ms)Running coprocessor post-open hooks at 1731153121709 (+6 ms)Region opened successfully at 1731153121711 (+2 ms) 2024-11-09T11:52:01,718 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731153121621 2024-11-09T11:52:01,730 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T11:52:01,730 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T11:52:01,732 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:01,734 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3264b4bbda9f,34739,1731153119511, state=OPEN 2024-11-09T11:52:01,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:01,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:01,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:01,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:01,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:01,791 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:01,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T11:52:01,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3264b4bbda9f,34739,1731153119511 in 343 msec 2024-11-09T11:52:01,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T11:52:01,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 859 msec 2024-11-09T11:52:01,807 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:01,807 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T11:52:01,824 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T11:52:01,825 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3264b4bbda9f,34739,1731153119511, seqNum=-1] 2024-11-09T11:52:01,863 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T11:52:01,865 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48219, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T11:52:01,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1700 sec 2024-11-09T11:52:01,897 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731153121897, completionTime=-1 2024-11-09T11:52:01,900 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T11:52:01,900 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T11:52:01,926 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T11:52:01,926 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731153181926 2024-11-09T11:52:01,926 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731153241926 2024-11-09T11:52:01,927 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 26 msec 2024-11-09T11:52:01,928 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T11:52:01,935 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43655,1731153118776-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,935 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43655,1731153118776-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,935 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43655,1731153118776-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,937 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3264b4bbda9f:43655, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,937 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,938 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:01,944 DEBUG [master/3264b4bbda9f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T11:52:01,966 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.182sec 2024-11-09T11:52:01,968 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T11:52:01,969 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T11:52:01,969 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T11:52:01,970 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T11:52:01,970 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T11:52:01,970 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43655,1731153118776-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:01,971 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43655,1731153118776-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T11:52:01,975 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T11:52:01,975 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T11:52:01,976 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43655,1731153118776-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:02,023 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7924b1b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T11:52:02,027 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-09T11:52:02,027 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-09T11:52:02,031 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3264b4bbda9f,43655,-1 for getting cluster id 2024-11-09T11:52:02,033 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T11:52:02,041 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8fdf4288-ebe5-4288-a8e5-08757ab9ee81' 2024-11-09T11:52:02,043 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T11:52:02,044 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8fdf4288-ebe5-4288-a8e5-08757ab9ee81" 2024-11-09T11:52:02,045 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f659864, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T11:52:02,045 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3264b4bbda9f,43655,-1] 2024-11-09T11:52:02,048 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T11:52:02,050 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:02,051 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T11:52:02,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9215b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T11:52:02,054 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T11:52:02,061 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3264b4bbda9f,34739,1731153119511, seqNum=-1] 2024-11-09T11:52:02,061 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T11:52:02,064 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40650, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T11:52:02,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3264b4bbda9f,43655,1731153118776 2024-11-09T11:52:02,087 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T11:52:02,091 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3264b4bbda9f,43655,1731153118776 2024-11-09T11:52:02,095 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3915b32d 2024-11-09T11:52:02,096 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T11:52:02,099 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46920, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T11:52:02,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T11:52:02,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T11:52:02,116 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T11:52:02,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T11:52:02,119 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:02,122 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T11:52:02,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:02,130 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:02,130 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:02,134 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:38044 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38044 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:02,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-09T11:52:02,139 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:02,142 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1b87390fafabb863bdf354f3814d38fe, NAME => 'TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934 2024-11-09T11:52:02,149 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:02,149 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:02,155 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:53712 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53712 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:02,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-09T11:52:02,161 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:02,162 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:02,162 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 1b87390fafabb863bdf354f3814d38fe, disabling compactions & flushes 2024-11-09T11:52:02,162 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,162 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,162 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. after waiting 0 ms 2024-11-09T11:52:02,162 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,162 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,162 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1b87390fafabb863bdf354f3814d38fe: Waiting for close lock at 1731153122162Disabling compacts and flushes for region at 1731153122162Disabling writes for close at 1731153122162Writing region close event to WAL at 1731153122162Closed at 1731153122162 2024-11-09T11:52:02,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T11:52:02,169 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731153122164"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731153122164"}]},"ts":"1731153122164"} 2024-11-09T11:52:02,173 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T11:52:02,175 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T11:52:02,179 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731153122176"}]},"ts":"1731153122176"} 2024-11-09T11:52:02,183 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T11:52:02,184 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3264b4bbda9f=0} racks are {/default-rack=0} 2024-11-09T11:52:02,185 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T11:52:02,185 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T11:52:02,185 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T11:52:02,186 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T11:52:02,186 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T11:52:02,186 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T11:52:02,186 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T11:52:02,186 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T11:52:02,186 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T11:52:02,186 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T11:52:02,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1b87390fafabb863bdf354f3814d38fe, ASSIGN}] 2024-11-09T11:52:02,191 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1b87390fafabb863bdf354f3814d38fe, ASSIGN 2024-11-09T11:52:02,193 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1b87390fafabb863bdf354f3814d38fe, ASSIGN; state=OFFLINE, location=3264b4bbda9f,34739,1731153119511; forceNewPlan=false, retain=false 2024-11-09T11:52:02,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:02,347 INFO [3264b4bbda9f:43655 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T11:52:02,349 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1b87390fafabb863bdf354f3814d38fe, regionState=OPENING, regionLocation=3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:02,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1b87390fafabb863bdf354f3814d38fe, ASSIGN because future has completed 2024-11-09T11:52:02,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b87390fafabb863bdf354f3814d38fe, server=3264b4bbda9f,34739,1731153119511}] 2024-11-09T11:52:02,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:02,521 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,521 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1b87390fafabb863bdf354f3814d38fe, NAME => 'TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe.', STARTKEY => '', ENDKEY => ''} 2024-11-09T11:52:02,522 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,522 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:02,522 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,523 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,525 INFO [StoreOpener-1b87390fafabb863bdf354f3814d38fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,527 INFO [StoreOpener-1b87390fafabb863bdf354f3814d38fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b87390fafabb863bdf354f3814d38fe columnFamilyName cf 2024-11-09T11:52:02,527 DEBUG [StoreOpener-1b87390fafabb863bdf354f3814d38fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:02,528 INFO [StoreOpener-1b87390fafabb863bdf354f3814d38fe-1 {}] regionserver.HStore(327): Store=1b87390fafabb863bdf354f3814d38fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:02,528 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,530 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,530 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,531 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,531 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,533 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,538 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T11:52:02,539 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1b87390fafabb863bdf354f3814d38fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74660707, jitterRate=0.11253122985363007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T11:52:02,539 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:02,541 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1b87390fafabb863bdf354f3814d38fe: Running coprocessor pre-open hook at 1731153122523Writing region info on filesystem at 1731153122523Initializing all the Stores at 1731153122525 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153122525Cleaning up temporary data from old regions at 1731153122531 (+6 ms)Running coprocessor post-open hooks at 1731153122539 (+8 ms)Region opened successfully at 1731153122540 (+1 ms) 2024-11-09T11:52:02,543 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe., pid=6, masterSystemTime=1731153122511 2024-11-09T11:52:02,546 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,546 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,547 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1b87390fafabb863bdf354f3814d38fe, regionState=OPEN, openSeqNum=2, regionLocation=3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:02,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b87390fafabb863bdf354f3814d38fe, server=3264b4bbda9f,34739,1731153119511 because future has completed 2024-11-09T11:52:02,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T11:52:02,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1b87390fafabb863bdf354f3814d38fe, server=3264b4bbda9f,34739,1731153119511 in 196 msec 2024-11-09T11:52:02,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T11:52:02,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1b87390fafabb863bdf354f3814d38fe, ASSIGN in 369 msec 2024-11-09T11:52:02,563 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T11:52:02,564 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731153122564"}]},"ts":"1731153122564"} 2024-11-09T11:52:02,567 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T11:52:02,569 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T11:52:02,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 461 msec 2024-11-09T11:52:02,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:02,760 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T11:52:02,760 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T11:52:02,761 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T11:52:02,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T11:52:02,767 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T11:52:02,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T11:52:02,775 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe., hostname=3264b4bbda9f,34739,1731153119511, seqNum=2] 2024-11-09T11:52:02,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T11:52:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T11:52:02,790 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T11:52:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:02,792 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T11:52:02,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T11:52:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:02,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34739 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T11:52:02,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:02,962 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 1b87390fafabb863bdf354f3814d38fe 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T11:52:03,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/.tmp/cf/1b481ec399404382b24563b9850bb486 is 36, key is row/cf:cq/1731153122777/Put/seqid=0 2024-11-09T11:52:03,031 WARN [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,031 WARN [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2022602906_22 at /127.0.0.1:38058 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38058 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-09T11:52:03,045 WARN [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,046 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/.tmp/cf/1b481ec399404382b24563b9850bb486 2024-11-09T11:52:03,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/.tmp/cf/1b481ec399404382b24563b9850bb486 as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/cf/1b481ec399404382b24563b9850bb486 2024-11-09T11:52:03,098 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/cf/1b481ec399404382b24563b9850bb486, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T11:52:03,104 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 1b87390fafabb863bdf354f3814d38fe in 142ms, sequenceid=5, compaction requested=false 2024-11-09T11:52:03,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-09T11:52:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 1b87390fafabb863bdf354f3814d38fe: 2024-11-09T11:52:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:03,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T11:52:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T11:52:03,117 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T11:52:03,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 320 msec 2024-11-09T11:52:03,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 334 msec 2024-11-09T11:52:03,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:03,422 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T11:52:03,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T11:52:03,440 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T11:52:03,440 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:03,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,445 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T11:52:03,445 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T11:52:03,445 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1720678036, stopped=false 2024-11-09T11:52:03,445 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3264b4bbda9f,43655,1731153118776 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:03,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:03,518 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T11:52:03,519 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T11:52:03,519 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:03,519 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:03,519 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:03,519 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:03,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,520 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:03,521 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3264b4bbda9f,45225,1731153119669' ***** 2024-11-09T11:52:03,521 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-11-09T11:52:03,521 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3264b4bbda9f,39789,1731153119617' ***** 2024-11-09T11:52:03,521 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3264b4bbda9f,34739,1731153119511' ***** 2024-11-09T11:52:03,521 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-11-09T11:52:03,521 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T11:52:03,522 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T11:52:03,523 INFO [RS:0;3264b4bbda9f:34739 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T11:52:03,523 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T11:52:03,523 INFO [RS:0;3264b4bbda9f:34739 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T11:52:03,523 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(3091): Received CLOSE for 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:03,524 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T11:52:03,524 INFO [RS:2;3264b4bbda9f:45225 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T11:52:03,524 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T11:52:03,524 INFO [RS:2;3264b4bbda9f:45225 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T11:52:03,524 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(959): stopping server 3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:03,524 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:03,524 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T11:52:03,524 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(959): stopping server 3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:03,524 INFO [RS:2;3264b4bbda9f:45225 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3264b4bbda9f:45225. 2024-11-09T11:52:03,524 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:03,524 INFO [RS:1;3264b4bbda9f:39789 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T11:52:03,524 INFO [RS:0;3264b4bbda9f:34739 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3264b4bbda9f:34739. 2024-11-09T11:52:03,524 INFO [RS:1;3264b4bbda9f:39789 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T11:52:03,524 DEBUG [RS:2;3264b4bbda9f:45225 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:03,524 DEBUG [RS:2;3264b4bbda9f:45225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,524 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(959): stopping server 3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:03,525 DEBUG [RS:0;3264b4bbda9f:34739 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:03,525 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:03,525 DEBUG [RS:0;3264b4bbda9f:34739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,525 INFO [RS:1;3264b4bbda9f:39789 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3264b4bbda9f:39789. 2024-11-09T11:52:03,525 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(976): stopping server 3264b4bbda9f,45225,1731153119669; all regions closed. 2024-11-09T11:52:03,525 DEBUG [RS:1;3264b4bbda9f:39789 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:03,525 DEBUG [RS:1;3264b4bbda9f:39789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,525 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1b87390fafabb863bdf354f3814d38fe, disabling compactions & flushes 2024-11-09T11:52:03,525 INFO [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:03,525 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T11:52:03,525 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:03,525 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. after waiting 0 ms 2024-11-09T11:52:03,525 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T11:52:03,525 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:03,525 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(976): stopping server 3264b4bbda9f,39789,1731153119617; all regions closed. 2024-11-09T11:52:03,525 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T11:52:03,525 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T11:52:03,525 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T11:52:03,526 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-09T11:52:03,526 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 1b87390fafabb863bdf354f3814d38fe=TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe.} 2024-11-09T11:52:03,526 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T11:52:03,526 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T11:52:03,526 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T11:52:03,526 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T11:52:03,526 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T11:52:03,527 DEBUG [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1b87390fafabb863bdf354f3814d38fe 2024-11-09T11:52:03,527 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T11:52:03,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_1073741828_1018 (size=93) 2024-11-09T11:52:03,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_1073741828_1018 (size=93) 2024-11-09T11:52:03,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_1073741828_1018 (size=93) 2024-11-09T11:52:03,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_1073741826_1016 (size=93) 2024-11-09T11:52:03,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_1073741826_1016 (size=93) 2024-11-09T11:52:03,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_1073741826_1016 (size=93) 2024-11-09T11:52:03,542 DEBUG [RS:2;3264b4bbda9f:45225 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs 2024-11-09T11:52:03,542 INFO [RS:2;3264b4bbda9f:45225 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3264b4bbda9f%2C45225%2C1731153119669:(num 1731153121162) 2024-11-09T11:52:03,542 DEBUG [RS:2;3264b4bbda9f:45225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,542 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:03,543 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:03,543 DEBUG [RS:1;3264b4bbda9f:39789 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3264b4bbda9f%2C39789%2C1731153119617:(num 1731153121162) 2024-11-09T11:52:03,543 DEBUG [RS:1;3264b4bbda9f:39789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:03,543 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.ChoreService(370): Chore service for: regionserver/3264b4bbda9f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:03,543 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.ChoreService(370): Chore service for: regionserver/3264b4bbda9f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:03,543 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T11:52:03,543 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T11:52:03,543 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T11:52:03,543 INFO [regionserver/3264b4bbda9f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T11:52:03,543 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:03,544 INFO [RS:2;3264b4bbda9f:45225 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45225 2024-11-09T11:52:03,544 INFO [RS:1;3264b4bbda9f:39789 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39789 2024-11-09T11:52:03,545 INFO [regionserver/3264b4bbda9f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:03,547 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/default/TestHBaseWalOnEC/1b87390fafabb863bdf354f3814d38fe/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T11:52:03,550 INFO [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:03,550 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1b87390fafabb863bdf354f3814d38fe: Waiting for close lock at 1731153123524Running coprocessor pre-close hooks at 1731153123525 (+1 ms)Disabling compacts and flushes for region at 1731153123525Disabling writes for close at 1731153123525Writing region close event to WAL at 1731153123529 (+4 ms)Running coprocessor post-close hooks at 1731153123548 (+19 ms)Closed at 1731153123550 (+2 ms) 2024-11-09T11:52:03,550 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe. 2024-11-09T11:52:03,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3264b4bbda9f,39789,1731153119617 2024-11-09T11:52:03,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T11:52:03,558 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:03,562 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/info/3bf6c2de08694f439072985363288779 is 153, key is TestHBaseWalOnEC,,1731153122100.1b87390fafabb863bdf354f3814d38fe./info:regioninfo/1731153122547/Put/seqid=0 2024-11-09T11:52:03,565 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,565 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,569 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:03,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3264b4bbda9f,45225,1731153119669 2024-11-09T11:52:03,569 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3264b4bbda9f,39789,1731153119617] 2024-11-09T11:52:03,569 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007fe7648f8748@45669fee rejected from java.util.concurrent.ThreadPoolExecutor@640926[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-09T11:52:03,570 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2022602906_22 at /127.0.0.1:53730 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53730 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-09T11:52:03,574 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,574 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/info/3bf6c2de08694f439072985363288779 2024-11-09T11:52:03,590 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3264b4bbda9f,39789,1731153119617 already deleted, retry=false 2024-11-09T11:52:03,590 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3264b4bbda9f,39789,1731153119617 expired; onlineServers=2 2024-11-09T11:52:03,590 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3264b4bbda9f,45225,1731153119669] 2024-11-09T11:52:03,600 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3264b4bbda9f,45225,1731153119669 already deleted, retry=false 2024-11-09T11:52:03,600 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3264b4bbda9f,45225,1731153119669 expired; onlineServers=1 2024-11-09T11:52:03,602 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/ns/59b812defa9540c3bebf30a401ded7a3 is 43, key is default/ns:d/1731153121871/Put/seqid=0 2024-11-09T11:52:03,604 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,604 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,606 INFO [regionserver/3264b4bbda9f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:03,606 INFO [regionserver/3264b4bbda9f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:03,606 INFO [regionserver/3264b4bbda9f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:03,608 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2022602906_22 at /127.0.0.1:38072 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38072 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-09T11:52:03,612 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,613 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/ns/59b812defa9540c3bebf30a401ded7a3 2024-11-09T11:52:03,641 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/table/2a03452d34af496bb15111f3b57d3937 is 52, key is TestHBaseWalOnEC/table:state/1731153122564/Put/seqid=0 2024-11-09T11:52:03,643 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,643 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2022602906_22 at /127.0.0.1:38092 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38092 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-09T11:52:03,650 WARN [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,651 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/table/2a03452d34af496bb15111f3b57d3937 2024-11-09T11:52:03,661 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/info/3bf6c2de08694f439072985363288779 as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/info/3bf6c2de08694f439072985363288779 2024-11-09T11:52:03,673 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/info/3bf6c2de08694f439072985363288779, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T11:52:03,675 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/ns/59b812defa9540c3bebf30a401ded7a3 as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/ns/59b812defa9540c3bebf30a401ded7a3 2024-11-09T11:52:03,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:03,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39789-0x1011f703ec90002, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:03,680 INFO [RS:1;3264b4bbda9f:39789 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:03,680 INFO [RS:1;3264b4bbda9f:39789 {}] regionserver.HRegionServer(1031): Exiting; stopping=3264b4bbda9f,39789,1731153119617; zookeeper connection closed. 2024-11-09T11:52:03,680 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63d0df3a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63d0df3a 2024-11-09T11:52:03,686 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/ns/59b812defa9540c3bebf30a401ded7a3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T11:52:03,687 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/.tmp/table/2a03452d34af496bb15111f3b57d3937 as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/table/2a03452d34af496bb15111f3b57d3937 2024-11-09T11:52:03,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:03,690 INFO [RS:2;3264b4bbda9f:45225 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:03,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45225-0x1011f703ec90003, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:03,690 INFO [RS:2;3264b4bbda9f:45225 {}] regionserver.HRegionServer(1031): Exiting; stopping=3264b4bbda9f,45225,1731153119669; zookeeper connection closed. 2024-11-09T11:52:03,690 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60c9ee3c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@60c9ee3c 2024-11-09T11:52:03,699 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/table/2a03452d34af496bb15111f3b57d3937, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T11:52:03,701 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 174ms, sequenceid=11, compaction requested=false 2024-11-09T11:52:03,701 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-09T11:52:03,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-09T11:52:03,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-09T11:52:03,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-09T11:52:03,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-09T11:52:03,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-09T11:52:03,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-09T11:52:03,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-09T11:52:03,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-09T11:52:03,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-09T11:52:03,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-09T11:52:03,718 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T11:52:03,719 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T11:52:03,719 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T11:52:03,719 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731153123526Running coprocessor pre-close hooks at 1731153123526Disabling compacts and flushes for region at 1731153123526Disabling writes for close at 1731153123526Obtaining lock to block concurrent updates at 1731153123527 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731153123527Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731153123528 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731153123529 (+1 ms)Flushing 1588230740/info: creating writer at 1731153123529Flushing 1588230740/info: appending metadata at 1731153123557 (+28 ms)Flushing 1588230740/info: closing flushed file at 1731153123557Flushing 1588230740/ns: creating writer at 1731153123584 (+27 ms)Flushing 1588230740/ns: appending metadata at 1731153123600 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731153123600Flushing 1588230740/table: creating writer at 1731153123622 (+22 ms)Flushing 1588230740/table: appending metadata at 1731153123639 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731153123639Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@178ed33a: reopening flushed file at 1731153123660 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b6ab23c: reopening flushed file at 1731153123673 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7aa7047b: reopening flushed file at 1731153123686 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 174ms, sequenceid=11, compaction requested=false at 1731153123701 (+15 ms)Writing region close event to WAL at 1731153123707 (+6 ms)Running coprocessor post-close hooks at 1731153123719 (+12 ms)Closed at 1731153123719 2024-11-09T11:52:03,720 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T11:52:03,727 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(976): stopping server 3264b4bbda9f,34739,1731153119511; all regions closed. 2024-11-09T11:52:03,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_1073741829_1019 (size=2751) 2024-11-09T11:52:03,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_1073741829_1019 (size=2751) 2024-11-09T11:52:03,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_1073741829_1019 (size=2751) 2024-11-09T11:52:03,733 DEBUG [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs 2024-11-09T11:52:03,733 INFO [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3264b4bbda9f%2C34739%2C1731153119511.meta:.meta(num 1731153121650) 2024-11-09T11:52:03,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_1073741827_1017 (size=1298) 2024-11-09T11:52:03,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_1073741827_1017 (size=1298) 2024-11-09T11:52:03,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_1073741827_1017 (size=1298) 2024-11-09T11:52:03,740 DEBUG [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/oldWALs 2024-11-09T11:52:03,740 INFO [RS:0;3264b4bbda9f:34739 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3264b4bbda9f%2C34739%2C1731153119511:(num 1731153121162) 2024-11-09T11:52:03,740 DEBUG [RS:0;3264b4bbda9f:34739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:03,740 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:03,740 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:03,741 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.ChoreService(370): Chore service for: regionserver/3264b4bbda9f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:03,741 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:03,741 INFO [regionserver/3264b4bbda9f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:03,741 INFO [RS:0;3264b4bbda9f:34739 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34739 2024-11-09T11:52:03,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T11:52:03,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3264b4bbda9f,34739,1731153119511 2024-11-09T11:52:03,751 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:03,762 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3264b4bbda9f,34739,1731153119511] 2024-11-09T11:52:03,772 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3264b4bbda9f,34739,1731153119511 already deleted, retry=false 2024-11-09T11:52:03,772 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3264b4bbda9f,34739,1731153119511 expired; onlineServers=0 2024-11-09T11:52:03,772 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3264b4bbda9f,43655,1731153118776' ***** 2024-11-09T11:52:03,772 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T11:52:03,773 INFO [M:0;3264b4bbda9f:43655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:03,773 INFO [M:0;3264b4bbda9f:43655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:03,773 DEBUG [M:0;3264b4bbda9f:43655 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T11:52:03,773 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T11:52:03,773 DEBUG [M:0;3264b4bbda9f:43655 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T11:52:03,773 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.small.0-1731153120833 {}] cleaner.HFileCleaner(306): Exit Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.small.0-1731153120833,5,FailOnTimeoutGroup] 2024-11-09T11:52:03,773 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.large.0-1731153120830 {}] cleaner.HFileCleaner(306): Exit Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.large.0-1731153120830,5,FailOnTimeoutGroup] 2024-11-09T11:52:03,774 INFO [M:0;3264b4bbda9f:43655 {}] hbase.ChoreService(370): Chore service for: master/3264b4bbda9f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:03,774 INFO [M:0;3264b4bbda9f:43655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:03,774 DEBUG [M:0;3264b4bbda9f:43655 {}] master.HMaster(1795): Stopping service threads 2024-11-09T11:52:03,774 INFO [M:0;3264b4bbda9f:43655 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T11:52:03,774 INFO [M:0;3264b4bbda9f:43655 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T11:52:03,775 INFO [M:0;3264b4bbda9f:43655 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T11:52:03,775 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T11:52:03,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T11:52:03,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:03,783 DEBUG [M:0;3264b4bbda9f:43655 {}] zookeeper.ZKUtil(347): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T11:52:03,784 WARN [M:0;3264b4bbda9f:43655 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T11:52:03,785 INFO [M:0;3264b4bbda9f:43655 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/.lastflushedseqids 2024-11-09T11:52:03,801 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,801 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:38168 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:36503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38168 dst: /127.0.0.1:36503 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-09T11:52:03,808 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,808 INFO [M:0;3264b4bbda9f:43655 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T11:52:03,808 INFO [M:0;3264b4bbda9f:43655 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T11:52:03,809 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T11:52:03,809 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:03,809 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:03,809 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T11:52:03,809 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:03,809 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-09T11:52:03,830 DEBUG [M:0;3264b4bbda9f:43655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/79abbb84ca5044e4baeb77e9c9dec1fa is 82, key is hbase:meta,,1/info:regioninfo/1731153121732/Put/seqid=0 2024-11-09T11:52:03,832 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,832 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,835 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:34084 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41587:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34084 dst: /127.0.0.1:41587 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-09T11:52:03,841 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,841 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/79abbb84ca5044e4baeb77e9c9dec1fa 2024-11-09T11:52:03,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:03,862 INFO [RS:0;3264b4bbda9f:34739 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:03,862 INFO [RS:0;3264b4bbda9f:34739 {}] regionserver.HRegionServer(1031): Exiting; stopping=3264b4bbda9f,34739,1731153119511; zookeeper connection closed. 2024-11-09T11:52:03,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x1011f703ec90001, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:03,862 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@47866ac {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@47866ac 2024-11-09T11:52:03,863 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T11:52:03,865 DEBUG [M:0;3264b4bbda9f:43655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a11ece0ae7024acc891108725371985b is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731153122571/Put/seqid=0 2024-11-09T11:52:03,867 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,867 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:53792 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53792 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775552_1037 (size=6438) 2024-11-09T11:52:03,877 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,877 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a11ece0ae7024acc891108725371985b 2024-11-09T11:52:03,901 DEBUG [M:0;3264b4bbda9f:43655 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/462a34a19e82473e97896b3e50c921a8 is 69, key is 3264b4bbda9f,34739,1731153119511/rs:state/1731153120860/Put/seqid=0 2024-11-09T11:52:03,903 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,903 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T11:52:03,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1352494965_22 at /127.0.0.1:53802 [Receiving block BP-809292392-172.17.0.2-1731153113877:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35339:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53802 dst: /127.0.0.1:35339 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T11:52:03,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-09T11:52:03,910 WARN [M:0;3264b4bbda9f:43655 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T11:52:03,910 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/462a34a19e82473e97896b3e50c921a8 2024-11-09T11:52:03,918 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/79abbb84ca5044e4baeb77e9c9dec1fa as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/79abbb84ca5044e4baeb77e9c9dec1fa 2024-11-09T11:52:03,925 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/79abbb84ca5044e4baeb77e9c9dec1fa, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T11:52:03,927 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a11ece0ae7024acc891108725371985b as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a11ece0ae7024acc891108725371985b 2024-11-09T11:52:03,935 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a11ece0ae7024acc891108725371985b, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T11:52:03,937 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/462a34a19e82473e97896b3e50c921a8 as hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/462a34a19e82473e97896b3e50c921a8 2024-11-09T11:52:03,945 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/462a34a19e82473e97896b3e50c921a8, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T11:52:03,947 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=72, compaction requested=false 2024-11-09T11:52:03,948 INFO [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:03,948 DEBUG [M:0;3264b4bbda9f:43655 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731153123809Disabling compacts and flushes for region at 1731153123809Disabling writes for close at 1731153123809Obtaining lock to block concurrent updates at 1731153123809Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731153123809Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731153123809Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731153123810 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731153123810Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731153123829 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731153123830 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731153123849 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731153123864 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731153123864Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731153123885 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731153123901 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731153123901Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@647e80e2: reopening flushed file at 1731153123917 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75d7b2a: reopening flushed file at 1731153123926 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f8a2a35: reopening flushed file at 1731153123935 (+9 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=72, compaction requested=false at 1731153123947 (+12 ms)Writing region close event to WAL at 1731153123948 (+1 ms)Closed at 1731153123948 2024-11-09T11:52:03,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35339 is added to blk_1073741825_1011 (size=32662) 2024-11-09T11:52:03,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36503 is added to blk_1073741825_1011 (size=32662) 2024-11-09T11:52:03,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41587 is added to blk_1073741825_1011 (size=32662) 2024-11-09T11:52:03,954 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:03,954 INFO [M:0;3264b4bbda9f:43655 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T11:52:03,954 INFO [M:0;3264b4bbda9f:43655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43655 2024-11-09T11:52:03,954 INFO [M:0;3264b4bbda9f:43655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:04,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:04,062 INFO [M:0;3264b4bbda9f:43655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:04,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43655-0x1011f703ec90000, quorum=127.0.0.1:60726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:04,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@653e6301{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:04,110 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:04,110 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:04,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:04,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:04,115 WARN [BP-809292392-172.17.0.2-1731153113877 heartbeating to localhost/127.0.0.1:40827 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T11:52:04,115 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T11:52:04,115 WARN [BP-809292392-172.17.0.2-1731153113877 heartbeating to localhost/127.0.0.1:40827 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-809292392-172.17.0.2-1731153113877 (Datanode Uuid 00628b43-1e30-4aab-9f99-dfef2f36b11f) service to localhost/127.0.0.1:40827 2024-11-09T11:52:04,115 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T11:52:04,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data5/current/BP-809292392-172.17.0.2-1731153113877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:04,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data6/current/BP-809292392-172.17.0.2-1731153113877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:04,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T11:52:04,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@513cab2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:04,120 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:04,120 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:04,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:04,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:04,121 WARN [BP-809292392-172.17.0.2-1731153113877 heartbeating to localhost/127.0.0.1:40827 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T11:52:04,121 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T11:52:04,122 WARN [BP-809292392-172.17.0.2-1731153113877 heartbeating to localhost/127.0.0.1:40827 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-809292392-172.17.0.2-1731153113877 (Datanode Uuid c7428fe4-f620-4632-a5aa-936818a86cf3) service to localhost/127.0.0.1:40827 2024-11-09T11:52:04,122 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T11:52:04,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data3/current/BP-809292392-172.17.0.2-1731153113877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:04,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data4/current/BP-809292392-172.17.0.2-1731153113877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:04,123 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T11:52:04,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65462677{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:04,129 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:04,129 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:04,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:04,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:04,131 WARN [BP-809292392-172.17.0.2-1731153113877 heartbeating to localhost/127.0.0.1:40827 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T11:52:04,131 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T11:52:04,131 WARN [BP-809292392-172.17.0.2-1731153113877 heartbeating to localhost/127.0.0.1:40827 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-809292392-172.17.0.2-1731153113877 (Datanode Uuid 1247d701-817a-4bce-82ae-efb2adb0e6d6) service to localhost/127.0.0.1:40827 2024-11-09T11:52:04,131 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T11:52:04,131 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data1/current/BP-809292392-172.17.0.2-1731153113877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:04,131 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/cluster_16b883f8-ca3b-03dd-b794-eab898657145/data/data2/current/BP-809292392-172.17.0.2-1731153113877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:04,132 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T11:52:04,140 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58dbf239{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T11:52:04,140 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:04,141 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:04,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:04,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:04,150 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T11:52:04,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T11:52:04,187 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=90 (was 161), OpenFileDescriptor=437 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 84) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5862 (was 6148) 2024-11-09T11:52:04,194 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=90, OpenFileDescriptor=437, MaxFileDescriptor=1048576, SystemLoadAverage=134, ProcessCount=11, AvailableMemoryMB=5862 2024-11-09T11:52:04,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T11:52:04,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.log.dir so I do NOT create it in target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f 2024-11-09T11:52:04,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f50c6a47-b999-4f29-9ffc-4469cb1ef31e/hadoop.tmp.dir so I do NOT create it in target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f 2024-11-09T11:52:04,194 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9, deleteOnExit=true 2024-11-09T11:52:04,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/test.cache.data in system properties and HBase conf 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir in system properties and HBase conf 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T11:52:04,195 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T11:52:04,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T11:52:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/nfs.dump.dir in system properties and HBase conf 2024-11-09T11:52:04,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/java.io.tmpdir in system properties and HBase conf 2024-11-09T11:52:04,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T11:52:04,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T11:52:04,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T11:52:04,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:52:04,535 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:52:04,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:52:04,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:52:04,540 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T11:52:04,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:52:04,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3950f25b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:52:04,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@193f4fa8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:52:04,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5614c6c6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/java.io.tmpdir/jetty-localhost-43591-hadoop-hdfs-3_4_1-tests_jar-_-any-435851619004278735/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T11:52:04,648 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@236a8141{HTTP/1.1, (http/1.1)}{localhost:43591} 2024-11-09T11:52:04,648 INFO [Time-limited test {}] server.Server(415): Started @12828ms 2024-11-09T11:52:05,077 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:52:05,081 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:52:05,082 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:52:05,082 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:52:05,082 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T11:52:05,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ae83c3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:52:05,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47fad3b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:52:05,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f76b201{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/java.io.tmpdir/jetty-localhost-39215-hadoop-hdfs-3_4_1-tests_jar-_-any-18296024951316122188/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:05,178 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@28829bc6{HTTP/1.1, (http/1.1)}{localhost:39215} 2024-11-09T11:52:05,178 INFO [Time-limited test {}] server.Server(415): Started @13358ms 2024-11-09T11:52:05,180 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T11:52:05,216 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:52:05,220 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:52:05,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:52:05,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:52:05,221 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T11:52:05,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@143655f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:52:05,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e802acb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:52:05,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3373c13a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/java.io.tmpdir/jetty-localhost-37105-hadoop-hdfs-3_4_1-tests_jar-_-any-15619902098833163235/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:05,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@619667{HTTP/1.1, (http/1.1)}{localhost:37105} 2024-11-09T11:52:05,316 INFO [Time-limited test {}] server.Server(415): Started @13496ms 2024-11-09T11:52:05,317 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T11:52:05,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T11:52:05,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T11:52:05,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T11:52:05,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T11:52:05,354 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T11:52:05,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@659fda5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,AVAILABLE} 2024-11-09T11:52:05,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17444eb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T11:52:05,449 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4038045d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/java.io.tmpdir/jetty-localhost-41017-hadoop-hdfs-3_4_1-tests_jar-_-any-2626273222161596238/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:05,449 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b9e74e2{HTTP/1.1, (http/1.1)}{localhost:41017} 2024-11-09T11:52:05,449 INFO [Time-limited test {}] server.Server(415): Started @13630ms 2024-11-09T11:52:05,451 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T11:52:06,556 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data2/current/BP-1269958615-172.17.0.2-1731153124222/current, will proceed with Du for space computation calculation, 2024-11-09T11:52:06,556 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data1/current/BP-1269958615-172.17.0.2-1731153124222/current, will proceed with Du for space computation calculation, 2024-11-09T11:52:06,575 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T11:52:06,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe28b9be012071b7 with lease ID 0xcb6e767853453e28: Processing first storage report for DS-63a3c673-6303-4fe6-8341-59e94278bdc8 from datanode DatanodeRegistration(127.0.0.1:34609, datanodeUuid=c3f16103-019c-437f-ba22-985aa1d189b7, infoPort=45901, infoSecurePort=0, ipcPort=34237, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222) 2024-11-09T11:52:06,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe28b9be012071b7 with lease ID 0xcb6e767853453e28: from storage DS-63a3c673-6303-4fe6-8341-59e94278bdc8 node DatanodeRegistration(127.0.0.1:34609, datanodeUuid=c3f16103-019c-437f-ba22-985aa1d189b7, infoPort=45901, infoSecurePort=0, ipcPort=34237, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:52:06,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe28b9be012071b7 with lease ID 0xcb6e767853453e28: Processing first storage report for DS-c04561fd-6d14-45c6-8ac2-06a675332937 from datanode DatanodeRegistration(127.0.0.1:34609, datanodeUuid=c3f16103-019c-437f-ba22-985aa1d189b7, infoPort=45901, infoSecurePort=0, ipcPort=34237, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222) 2024-11-09T11:52:06,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe28b9be012071b7 with lease ID 0xcb6e767853453e28: from storage DS-c04561fd-6d14-45c6-8ac2-06a675332937 node DatanodeRegistration(127.0.0.1:34609, datanodeUuid=c3f16103-019c-437f-ba22-985aa1d189b7, infoPort=45901, infoSecurePort=0, ipcPort=34237, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:52:06,721 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data4/current/BP-1269958615-172.17.0.2-1731153124222/current, will proceed with Du for space computation calculation, 2024-11-09T11:52:06,721 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data3/current/BP-1269958615-172.17.0.2-1731153124222/current, will proceed with Du for space computation calculation, 2024-11-09T11:52:06,735 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T11:52:06,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4012538d72d29aa9 with lease ID 0xcb6e767853453e29: Processing first storage report for DS-3c446442-042f-49e1-a769-e8b84970f50f from datanode DatanodeRegistration(127.0.0.1:46067, datanodeUuid=9e558314-2543-4f0c-bc67-28a1e4577c41, infoPort=39659, infoSecurePort=0, ipcPort=43855, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222) 2024-11-09T11:52:06,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4012538d72d29aa9 with lease ID 0xcb6e767853453e29: from storage DS-3c446442-042f-49e1-a769-e8b84970f50f node DatanodeRegistration(127.0.0.1:46067, datanodeUuid=9e558314-2543-4f0c-bc67-28a1e4577c41, infoPort=39659, infoSecurePort=0, ipcPort=43855, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T11:52:06,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4012538d72d29aa9 with lease ID 0xcb6e767853453e29: Processing first storage report for DS-ed48445c-3d06-420d-9612-e80e64f1904e from datanode DatanodeRegistration(127.0.0.1:46067, datanodeUuid=9e558314-2543-4f0c-bc67-28a1e4577c41, infoPort=39659, infoSecurePort=0, ipcPort=43855, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222) 2024-11-09T11:52:06,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4012538d72d29aa9 with lease ID 0xcb6e767853453e29: from storage DS-ed48445c-3d06-420d-9612-e80e64f1904e node DatanodeRegistration(127.0.0.1:46067, datanodeUuid=9e558314-2543-4f0c-bc67-28a1e4577c41, infoPort=39659, infoSecurePort=0, ipcPort=43855, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:52:06,805 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data5/current/BP-1269958615-172.17.0.2-1731153124222/current, will proceed with Du for space computation calculation, 2024-11-09T11:52:06,805 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data6/current/BP-1269958615-172.17.0.2-1731153124222/current, will proceed with Du for space computation calculation, 2024-11-09T11:52:06,825 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T11:52:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6193fcf87312a23a with lease ID 0xcb6e767853453e2a: Processing first storage report for DS-e1aae1ff-fdb8-4b21-a747-f6d0a222294b from datanode DatanodeRegistration(127.0.0.1:43611, datanodeUuid=0a67e403-de3e-402e-8a69-025f76f986aa, infoPort=34337, infoSecurePort=0, ipcPort=35167, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222) 2024-11-09T11:52:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6193fcf87312a23a with lease ID 0xcb6e767853453e2a: from storage DS-e1aae1ff-fdb8-4b21-a747-f6d0a222294b node DatanodeRegistration(127.0.0.1:43611, datanodeUuid=0a67e403-de3e-402e-8a69-025f76f986aa, infoPort=34337, infoSecurePort=0, ipcPort=35167, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T11:52:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6193fcf87312a23a with lease ID 0xcb6e767853453e2a: Processing first storage report for DS-8ed7abad-b073-4e09-830a-7c93a9d54e0d from datanode DatanodeRegistration(127.0.0.1:43611, datanodeUuid=0a67e403-de3e-402e-8a69-025f76f986aa, infoPort=34337, infoSecurePort=0, ipcPort=35167, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222) 2024-11-09T11:52:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6193fcf87312a23a with lease ID 0xcb6e767853453e2a: from storage DS-8ed7abad-b073-4e09-830a-7c93a9d54e0d node DatanodeRegistration(127.0.0.1:43611, datanodeUuid=0a67e403-de3e-402e-8a69-025f76f986aa, infoPort=34337, infoSecurePort=0, ipcPort=35167, storageInfo=lv=-57;cid=testClusterID;nsid=2029951465;c=1731153124222), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T11:52:06,896 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f 2024-11-09T11:52:06,899 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/zookeeper_0, clientPort=50195, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T11:52:06,899 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50195 2024-11-09T11:52:06,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:06,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:06,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741825_1001 (size=7) 2024-11-09T11:52:06,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741825_1001 (size=7) 2024-11-09T11:52:06,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741825_1001 (size=7) 2024-11-09T11:52:06,915 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be with version=8 2024-11-09T11:52:06,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40827/user/jenkins/test-data/613ee75c-25bb-36d1-87a3-2a1bed647934/hbase-staging 2024-11-09T11:52:06,917 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:52:06,917 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:06,918 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:06,918 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:52:06,918 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:06,918 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:52:06,918 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T11:52:06,918 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:52:06,919 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34511 2024-11-09T11:52:06,920 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34511 connecting to ZooKeeper ensemble=127.0.0.1:50195 2024-11-09T11:52:06,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:345110x0, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:52:06,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34511-0x1011f70619d0000 connected 2024-11-09T11:52:07,056 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,060 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:07,060 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be, hbase.cluster.distributed=false 2024-11-09T11:52:07,062 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:52:07,062 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34511 2024-11-09T11:52:07,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34511 2024-11-09T11:52:07,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34511 2024-11-09T11:52:07,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34511 2024-11-09T11:52:07,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34511 2024-11-09T11:52:07,079 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:52:07,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,080 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:52:07,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:52:07,080 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T11:52:07,080 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:52:07,081 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37851 2024-11-09T11:52:07,082 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37851 connecting to ZooKeeper ensemble=127.0.0.1:50195 2024-11-09T11:52:07,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378510x0, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:52:07,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:378510x0, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:07,099 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37851-0x1011f70619d0001 connected 2024-11-09T11:52:07,099 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T11:52:07,100 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T11:52:07,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T11:52:07,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:52:07,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37851 2024-11-09T11:52:07,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37851 2024-11-09T11:52:07,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37851 2024-11-09T11:52:07,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37851 2024-11-09T11:52:07,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37851 2024-11-09T11:52:07,117 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:52:07,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,118 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:52:07,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:52:07,118 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T11:52:07,118 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:52:07,119 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43811 2024-11-09T11:52:07,120 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43811 connecting to ZooKeeper ensemble=127.0.0.1:50195 2024-11-09T11:52:07,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,137 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:438110x0, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:52:07,138 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:438110x0, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:07,138 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43811-0x1011f70619d0002 connected 2024-11-09T11:52:07,138 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T11:52:07,139 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T11:52:07,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T11:52:07,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:52:07,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43811 2024-11-09T11:52:07,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43811 2024-11-09T11:52:07,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43811 2024-11-09T11:52:07,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43811 2024-11-09T11:52:07,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43811 2024-11-09T11:52:07,156 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3264b4bbda9f:0 server-side Connection retries=45 2024-11-09T11:52:07,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,156 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T11:52:07,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T11:52:07,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T11:52:07,156 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T11:52:07,157 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T11:52:07,157 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44011 2024-11-09T11:52:07,159 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44011 connecting to ZooKeeper ensemble=127.0.0.1:50195 2024-11-09T11:52:07,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,161 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440110x0, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T11:52:07,172 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:440110x0, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:07,172 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44011-0x1011f70619d0003 connected 2024-11-09T11:52:07,173 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T11:52:07,173 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T11:52:07,174 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T11:52:07,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T11:52:07,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44011 2024-11-09T11:52:07,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44011 2024-11-09T11:52:07,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44011 2024-11-09T11:52:07,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44011 2024-11-09T11:52:07,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44011 2024-11-09T11:52:07,186 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3264b4bbda9f:34511 2024-11-09T11:52:07,187 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,188 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-09T11:52:07,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,193 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,194 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T11:52:07,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T11:52:07,203 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T11:52:07,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,203 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,204 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T11:52:07,205 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3264b4bbda9f,34511,1731153126917 from backup master directory 2024-11-09T11:52:07,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,214 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,214 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:07,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T11:52:07,214 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,220 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/hbase.id] with ID: 99f39ece-0630-4a77-932c-5101d68181ca 2024-11-09T11:52:07,220 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/.tmp/hbase.id 2024-11-09T11:52:07,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741826_1002 (size=42) 2024-11-09T11:52:07,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741826_1002 (size=42) 2024-11-09T11:52:07,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741826_1002 (size=42) 2024-11-09T11:52:07,236 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/.tmp/hbase.id]:[hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/hbase.id] 2024-11-09T11:52:07,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T11:52:07,251 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T11:52:07,255 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T11:52:07,255 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T11:52:07,257 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-09T11:52:07,267 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741827_1003 (size=196) 2024-11-09T11:52:07,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741827_1003 (size=196) 2024-11-09T11:52:07,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741827_1003 (size=196) 2024-11-09T11:52:07,277 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T11:52:07,278 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T11:52:07,278 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T11:52:07,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741828_1004 (size=1189) 2024-11-09T11:52:07,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741828_1004 (size=1189) 2024-11-09T11:52:07,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741828_1004 (size=1189) 2024-11-09T11:52:07,292 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store 2024-11-09T11:52:07,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741829_1005 (size=34) 2024-11-09T11:52:07,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741829_1005 (size=34) 2024-11-09T11:52:07,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741829_1005 (size=34) 2024-11-09T11:52:07,302 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:07,302 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T11:52:07,302 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:07,303 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:07,303 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T11:52:07,303 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:07,303 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:07,303 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731153127302Disabling compacts and flushes for region at 1731153127302Disabling writes for close at 1731153127303 (+1 ms)Writing region close event to WAL at 1731153127303Closed at 1731153127303 2024-11-09T11:52:07,304 WARN [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/.initializing 2024-11-09T11:52:07,304 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/WALs/3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,307 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C34511%2C1731153126917, suffix=, logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/WALs/3264b4bbda9f,34511,1731153126917, archiveDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/oldWALs, maxLogs=10 2024-11-09T11:52:07,308 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3264b4bbda9f%2C34511%2C1731153126917.1731153127307 2024-11-09T11:52:07,317 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/WALs/3264b4bbda9f,34511,1731153126917/3264b4bbda9f%2C34511%2C1731153126917.1731153127307 2024-11-09T11:52:07,318 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39659:39659),(127.0.0.1/127.0.0.1:45901:45901),(127.0.0.1/127.0.0.1:34337:34337)] 2024-11-09T11:52:07,319 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T11:52:07,319 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:07,319 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,319 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,321 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,323 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T11:52:07,323 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,323 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:07,324 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,325 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T11:52:07,325 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,326 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:07,326 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T11:52:07,328 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:07,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T11:52:07,331 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:07,332 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,333 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,334 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,335 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,335 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,336 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T11:52:07,337 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T11:52:07,341 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T11:52:07,341 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72786343, jitterRate=0.084601029753685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T11:52:07,343 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731153127319Initializing all the Stores at 1731153127321 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153127321Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153127321Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153127321Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153127321Cleaning up temporary data from old regions at 1731153127335 (+14 ms)Region opened successfully at 1731153127343 (+8 ms) 2024-11-09T11:52:07,343 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T11:52:07,348 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de78885, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:07,349 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T11:52:07,349 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T11:52:07,349 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T11:52:07,349 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T11:52:07,350 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-09T11:52:07,350 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-09T11:52:07,350 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T11:52:07,353 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T11:52:07,354 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T11:52:07,361 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T11:52:07,362 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T11:52:07,362 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T11:52:07,372 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T11:52:07,372 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T11:52:07,374 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T11:52:07,382 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T11:52:07,383 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T11:52:07,393 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T11:52:07,395 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T11:52:07,403 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T11:52:07,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:07,414 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:07,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:07,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:07,414 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,414 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3264b4bbda9f,34511,1731153126917, sessionid=0x1011f70619d0000, setting cluster-up flag (Was=false) 2024-11-09T11:52:07,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,515 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,666 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T11:52:07,668 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,687 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:07,719 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T11:52:07,721 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:07,722 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T11:52:07,724 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:07,725 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T11:52:07,725 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T11:52:07,725 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3264b4bbda9f,34511,1731153126917 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=5, maxPoolSize=5 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3264b4bbda9f:0, corePoolSize=10, maxPoolSize=10 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:07,727 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731153157728 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T11:52:07,728 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T11:52:07,729 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,729 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T11:52:07,729 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T11:52:07,729 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T11:52:07,729 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T11:52:07,730 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T11:52:07,730 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:07,730 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T11:52:07,731 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,731 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.large.0-1731153127730,5,FailOnTimeoutGroup] 2024-11-09T11:52:07,731 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.small.0-1731153127731,5,FailOnTimeoutGroup] 2024-11-09T11:52:07,731 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,732 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T11:52:07,732 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,731 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T11:52:07,732 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741831_1007 (size=1321) 2024-11-09T11:52:07,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741831_1007 (size=1321) 2024-11-09T11:52:07,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741831_1007 (size=1321) 2024-11-09T11:52:07,742 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T11:52:07,742 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be 2024-11-09T11:52:07,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741832_1008 (size=32) 2024-11-09T11:52:07,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741832_1008 (size=32) 2024-11-09T11:52:07,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741832_1008 (size=32) 2024-11-09T11:52:07,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:07,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T11:52:07,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T11:52:07,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:07,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T11:52:07,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T11:52:07,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:07,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T11:52:07,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T11:52:07,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:07,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T11:52:07,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T11:52:07,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:07,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:07,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T11:52:07,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740 2024-11-09T11:52:07,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740 2024-11-09T11:52:07,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T11:52:07,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T11:52:07,766 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T11:52:07,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T11:52:07,769 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T11:52:07,770 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65434748, jitterRate=-0.024946272373199463}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T11:52:07,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731153127753Initializing all the Stores at 1731153127754 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153127754Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153127754Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153127754Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153127754Cleaning up temporary data from old regions at 1731153127765 (+11 ms)Region opened successfully at 1731153127771 (+6 ms) 2024-11-09T11:52:07,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T11:52:07,771 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T11:52:07,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T11:52:07,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T11:52:07,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T11:52:07,771 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T11:52:07,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731153127771Disabling compacts and flushes for region at 1731153127771Disabling writes for close at 1731153127771Writing region close event to WAL at 1731153127771Closed at 1731153127771 2024-11-09T11:52:07,773 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:07,773 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T11:52:07,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T11:52:07,775 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T11:52:07,776 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T11:52:07,779 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(746): ClusterId : 99f39ece-0630-4a77-932c-5101d68181ca 2024-11-09T11:52:07,779 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(746): ClusterId : 99f39ece-0630-4a77-932c-5101d68181ca 2024-11-09T11:52:07,779 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(746): ClusterId : 99f39ece-0630-4a77-932c-5101d68181ca 2024-11-09T11:52:07,779 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T11:52:07,779 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T11:52:07,779 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T11:52:07,791 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T11:52:07,791 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T11:52:07,791 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T11:52:07,791 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T11:52:07,791 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T11:52:07,791 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T11:52:07,814 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T11:52:07,814 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T11:52:07,815 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T11:52:07,815 DEBUG [RS:1;3264b4bbda9f:43811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39bd3223, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:07,815 DEBUG [RS:0;3264b4bbda9f:37851 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ea71f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:07,815 DEBUG [RS:2;3264b4bbda9f:44011 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b5d40e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3264b4bbda9f/172.17.0.2:0 2024-11-09T11:52:07,829 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3264b4bbda9f:44011 2024-11-09T11:52:07,829 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3264b4bbda9f:43811 2024-11-09T11:52:07,829 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T11:52:07,829 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T11:52:07,829 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T11:52:07,829 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T11:52:07,829 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T11:52:07,829 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T11:52:07,830 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(2659): reportForDuty to master=3264b4bbda9f,34511,1731153126917 with port=43811, startcode=1731153127117 2024-11-09T11:52:07,830 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(2659): reportForDuty to master=3264b4bbda9f,34511,1731153126917 with port=44011, startcode=1731153127155 2024-11-09T11:52:07,830 DEBUG [RS:1;3264b4bbda9f:43811 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T11:52:07,830 DEBUG [RS:2;3264b4bbda9f:44011 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T11:52:07,832 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60599, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T11:52:07,832 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58341, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T11:52:07,833 DEBUG [RS:0;3264b4bbda9f:37851 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3264b4bbda9f:37851 2024-11-09T11:52:07,833 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T11:52:07,833 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T11:52:07,833 DEBUG [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T11:52:07,833 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:07,833 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34511 {}] master.ServerManager(517): Registering regionserver=3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:07,834 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(2659): reportForDuty to master=3264b4bbda9f,34511,1731153126917 with port=37851, startcode=1731153127079 2024-11-09T11:52:07,834 DEBUG [RS:0;3264b4bbda9f:37851 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T11:52:07,835 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:07,836 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34511 {}] master.ServerManager(517): Registering regionserver=3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:07,836 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be 2024-11-09T11:52:07,836 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44375 2024-11-09T11:52:07,836 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T11:52:07,836 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47171, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T11:52:07,838 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:07,838 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be 2024-11-09T11:52:07,838 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34511 {}] master.ServerManager(517): Registering regionserver=3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:07,838 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44375 2024-11-09T11:52:07,838 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T11:52:07,840 DEBUG [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be 2024-11-09T11:52:07,840 DEBUG [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44375 2024-11-09T11:52:07,840 DEBUG [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T11:52:07,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T11:52:07,881 DEBUG [RS:1;3264b4bbda9f:43811 {}] zookeeper.ZKUtil(111): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:07,881 WARN [RS:1;3264b4bbda9f:43811 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:07,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3264b4bbda9f,44011,1731153127155] 2024-11-09T11:52:07,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3264b4bbda9f,43811,1731153127117] 2024-11-09T11:52:07,881 INFO [RS:1;3264b4bbda9f:43811 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T11:52:07,881 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3264b4bbda9f,37851,1731153127079] 2024-11-09T11:52:07,881 DEBUG [RS:2;3264b4bbda9f:44011 {}] zookeeper.ZKUtil(111): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:07,881 DEBUG [RS:0;3264b4bbda9f:37851 {}] zookeeper.ZKUtil(111): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:07,881 WARN [RS:2;3264b4bbda9f:44011 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:07,881 WARN [RS:0;3264b4bbda9f:37851 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T11:52:07,881 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:07,881 INFO [RS:2;3264b4bbda9f:44011 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T11:52:07,881 INFO [RS:0;3264b4bbda9f:37851 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T11:52:07,881 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:07,881 DEBUG [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:07,885 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T11:52:07,885 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T11:52:07,885 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T11:52:07,888 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T11:52:07,889 INFO [RS:0;3264b4bbda9f:37851 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T11:52:07,889 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,890 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T11:52:07,890 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T11:52:07,891 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T11:52:07,891 INFO [RS:2;3264b4bbda9f:44011 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T11:52:07,891 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,891 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,891 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,891 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,891 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,891 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:07,892 DEBUG [RS:0;3264b4bbda9f:37851 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:07,893 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T11:52:07,893 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,893 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,893 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,893 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:07,894 DEBUG [RS:2;3264b4bbda9f:44011 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:07,895 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T11:52:07,899 INFO [RS:1;3264b4bbda9f:43811 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T11:52:07,899 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T11:52:07,899 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,37851,1731153127079-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:07,899 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,900 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,44011,1731153127155-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:07,900 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T11:52:07,901 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3264b4bbda9f:0, corePoolSize=2, maxPoolSize=2 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3264b4bbda9f:0, corePoolSize=1, maxPoolSize=1 2024-11-09T11:52:07,901 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:07,902 DEBUG [RS:1;3264b4bbda9f:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0, corePoolSize=3, maxPoolSize=3 2024-11-09T11:52:07,908 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,909 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,909 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,909 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,909 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,909 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43811,1731153127117-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:07,917 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T11:52:07,917 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,37851,1731153127079-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,917 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,917 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.Replication(171): 3264b4bbda9f,37851,1731153127079 started 2024-11-09T11:52:07,924 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T11:52:07,924 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,44011,1731153127155-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,924 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,924 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.Replication(171): 3264b4bbda9f,44011,1731153127155 started 2024-11-09T11:52:07,926 WARN [3264b4bbda9f:34511 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T11:52:07,929 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T11:52:07,929 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,43811,1731153127117-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,929 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,929 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.Replication(171): 3264b4bbda9f,43811,1731153127117 started 2024-11-09T11:52:07,930 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,930 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(1482): Serving as 3264b4bbda9f,37851,1731153127079, RpcServer on 3264b4bbda9f/172.17.0.2:37851, sessionid=0x1011f70619d0001 2024-11-09T11:52:07,931 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T11:52:07,931 DEBUG [RS:0;3264b4bbda9f:37851 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:07,931 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,37851,1731153127079' 2024-11-09T11:52:07,931 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T11:52:07,932 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T11:52:07,932 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T11:52:07,932 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T11:52:07,932 DEBUG [RS:0;3264b4bbda9f:37851 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:07,932 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,37851,1731153127079' 2024-11-09T11:52:07,932 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T11:52:07,933 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T11:52:07,933 DEBUG [RS:0;3264b4bbda9f:37851 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T11:52:07,933 INFO [RS:0;3264b4bbda9f:37851 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T11:52:07,933 INFO [RS:0;3264b4bbda9f:37851 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T11:52:07,943 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,944 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1482): Serving as 3264b4bbda9f,44011,1731153127155, RpcServer on 3264b4bbda9f/172.17.0.2:44011, sessionid=0x1011f70619d0003 2024-11-09T11:52:07,944 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T11:52:07,944 DEBUG [RS:2;3264b4bbda9f:44011 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:07,944 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,44011,1731153127155' 2024-11-09T11:52:07,944 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T11:52:07,945 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:07,945 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T11:52:07,945 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1482): Serving as 3264b4bbda9f,43811,1731153127117, RpcServer on 3264b4bbda9f/172.17.0.2:43811, sessionid=0x1011f70619d0002 2024-11-09T11:52:07,945 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T11:52:07,945 DEBUG [RS:1;3264b4bbda9f:43811 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:07,945 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,43811,1731153127117' 2024-11-09T11:52:07,945 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T11:52:07,945 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T11:52:07,946 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T11:52:07,946 DEBUG [RS:2;3264b4bbda9f:44011 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:07,946 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,44011,1731153127155' 2024-11-09T11:52:07,946 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T11:52:07,946 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T11:52:07,946 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T11:52:07,946 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T11:52:07,946 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T11:52:07,946 DEBUG [RS:1;3264b4bbda9f:43811 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:07,946 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3264b4bbda9f,43811,1731153127117' 2024-11-09T11:52:07,946 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T11:52:07,947 DEBUG [RS:2;3264b4bbda9f:44011 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T11:52:07,947 INFO [RS:2;3264b4bbda9f:44011 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T11:52:07,947 INFO [RS:2;3264b4bbda9f:44011 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T11:52:07,947 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T11:52:07,947 DEBUG [RS:1;3264b4bbda9f:43811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T11:52:07,947 INFO [RS:1;3264b4bbda9f:43811 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T11:52:07,948 INFO [RS:1;3264b4bbda9f:43811 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T11:52:08,037 INFO [RS:0;3264b4bbda9f:37851 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C37851%2C1731153127079, suffix=, logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,37851,1731153127079, archiveDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs, maxLogs=32 2024-11-09T11:52:08,039 INFO [RS:0;3264b4bbda9f:37851 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3264b4bbda9f%2C37851%2C1731153127079.1731153128039 2024-11-09T11:52:08,048 INFO [RS:0;3264b4bbda9f:37851 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,37851,1731153127079/3264b4bbda9f%2C37851%2C1731153127079.1731153128039 2024-11-09T11:52:08,049 INFO [RS:2;3264b4bbda9f:44011 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C44011%2C1731153127155, suffix=, logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,44011,1731153127155, archiveDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs, maxLogs=32 2024-11-09T11:52:08,049 INFO [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C43811%2C1731153127117, suffix=, logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,43811,1731153127117, archiveDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs, maxLogs=32 2024-11-09T11:52:08,051 INFO [RS:2;3264b4bbda9f:44011 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3264b4bbda9f%2C44011%2C1731153127155.1731153128051 2024-11-09T11:52:08,051 INFO [RS:1;3264b4bbda9f:43811 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3264b4bbda9f%2C43811%2C1731153127117.1731153128051 2024-11-09T11:52:08,055 DEBUG [RS:0;3264b4bbda9f:37851 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45901:45901),(127.0.0.1/127.0.0.1:34337:34337),(127.0.0.1/127.0.0.1:39659:39659)] 2024-11-09T11:52:08,061 INFO [RS:2;3264b4bbda9f:44011 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,44011,1731153127155/3264b4bbda9f%2C44011%2C1731153127155.1731153128051 2024-11-09T11:52:08,061 INFO [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,43811,1731153127117/3264b4bbda9f%2C43811%2C1731153127117.1731153128051 2024-11-09T11:52:08,062 DEBUG [RS:2;3264b4bbda9f:44011 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34337:34337),(127.0.0.1/127.0.0.1:39659:39659),(127.0.0.1/127.0.0.1:45901:45901)] 2024-11-09T11:52:08,062 DEBUG [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45901:45901),(127.0.0.1/127.0.0.1:39659:39659),(127.0.0.1/127.0.0.1:34337:34337)] 2024-11-09T11:52:08,176 DEBUG [3264b4bbda9f:34511 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T11:52:08,177 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(204): Hosts are {3264b4bbda9f=0} racks are {/default-rack=0} 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T11:52:08,179 INFO [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T11:52:08,179 INFO [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T11:52:08,179 INFO [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T11:52:08,179 DEBUG [3264b4bbda9f:34511 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T11:52:08,179 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:08,181 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3264b4bbda9f,43811,1731153127117, state=OPENING 2024-11-09T11:52:08,210 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T11:52:08,221 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:08,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:08,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:08,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:08,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,222 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T11:52:08,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3264b4bbda9f,43811,1731153127117}] 2024-11-09T11:52:08,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,376 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T11:52:08,380 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T11:52:08,385 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T11:52:08,386 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T11:52:08,388 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3264b4bbda9f%2C43811%2C1731153127117.meta, suffix=.meta, logDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,43811,1731153127117, archiveDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs, maxLogs=32 2024-11-09T11:52:08,390 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3264b4bbda9f%2C43811%2C1731153127117.meta.1731153128390.meta 2024-11-09T11:52:08,397 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/WALs/3264b4bbda9f,43811,1731153127117/3264b4bbda9f%2C43811%2C1731153127117.meta.1731153128390.meta 2024-11-09T11:52:08,398 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39659:39659),(127.0.0.1/127.0.0.1:34337:34337),(127.0.0.1/127.0.0.1:45901:45901)] 2024-11-09T11:52:08,399 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T11:52:08,399 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T11:52:08,399 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T11:52:08,400 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T11:52:08,400 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T11:52:08,400 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:08,400 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T11:52:08,400 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T11:52:08,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T11:52:08,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T11:52:08,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:08,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:08,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T11:52:08,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T11:52:08,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:08,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:08,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T11:52:08,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T11:52:08,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:08,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:08,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T11:52:08,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T11:52:08,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:08,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T11:52:08,410 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T11:52:08,411 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740 2024-11-09T11:52:08,412 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740 2024-11-09T11:52:08,414 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T11:52:08,414 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T11:52:08,414 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T11:52:08,416 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T11:52:08,417 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71863548, jitterRate=0.07085031270980835}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T11:52:08,417 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T11:52:08,418 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731153128400Writing region info on filesystem at 1731153128400Initializing all the Stores at 1731153128401 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153128401Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153128402 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153128402Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731153128402Cleaning up temporary data from old regions at 1731153128414 (+12 ms)Running coprocessor post-open hooks at 1731153128417 (+3 ms)Region opened successfully at 1731153128418 (+1 ms) 2024-11-09T11:52:08,419 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731153128376 2024-11-09T11:52:08,422 DEBUG [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T11:52:08,422 INFO [RS_OPEN_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T11:52:08,423 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:08,425 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3264b4bbda9f,43811,1731153127117, state=OPEN 2024-11-09T11:52:08,453 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:08,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:08,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:08,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T11:52:08,453 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:08,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T11:52:08,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T11:52:08,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3264b4bbda9f,43811,1731153127117 in 231 msec 2024-11-09T11:52:08,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T11:52:08,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 688 msec 2024-11-09T11:52:08,467 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T11:52:08,467 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T11:52:08,470 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T11:52:08,470 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3264b4bbda9f,43811,1731153127117, seqNum=-1] 2024-11-09T11:52:08,470 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T11:52:08,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47173, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T11:52:08,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 756 msec 2024-11-09T11:52:08,481 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731153128481, completionTime=-1 2024-11-09T11:52:08,481 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T11:52:08,482 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T11:52:08,484 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T11:52:08,484 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731153188484 2024-11-09T11:52:08,484 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731153248484 2024-11-09T11:52:08,484 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-09T11:52:08,484 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T11:52:08,485 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34511,1731153126917-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,485 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34511,1731153126917-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,485 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34511,1731153126917-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,485 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3264b4bbda9f:34511, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,485 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,485 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,488 DEBUG [master/3264b4bbda9f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T11:52:08,490 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.276sec 2024-11-09T11:52:08,490 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T11:52:08,490 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T11:52:08,490 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T11:52:08,491 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T11:52:08,491 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T11:52:08,491 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34511,1731153126917-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T11:52:08,491 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34511,1731153126917-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T11:52:08,493 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T11:52:08,493 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T11:52:08,493 INFO [master/3264b4bbda9f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3264b4bbda9f,34511,1731153126917-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T11:52:08,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502349d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T11:52:08,579 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3264b4bbda9f,34511,-1 for getting cluster id 2024-11-09T11:52:08,580 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T11:52:08,581 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '99f39ece-0630-4a77-932c-5101d68181ca' 2024-11-09T11:52:08,582 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T11:52:08,582 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "99f39ece-0630-4a77-932c-5101d68181ca" 2024-11-09T11:52:08,582 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7df0ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T11:52:08,583 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3264b4bbda9f,34511,-1] 2024-11-09T11:52:08,583 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T11:52:08,583 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:08,585 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T11:52:08,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a80bb54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T11:52:08,587 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T11:52:08,590 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3264b4bbda9f,43811,1731153127117, seqNum=-1] 2024-11-09T11:52:08,591 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T11:52:08,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45658, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T11:52:08,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:08,597 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T11:52:08,598 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:08,598 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2075e3a6 2024-11-09T11:52:08,599 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T11:52:08,600 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45552, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T11:52:08,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T11:52:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T11:52:08,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T11:52:08,605 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:08,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T11:52:08,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:08,607 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T11:52:08,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741837_1013 (size=392) 2024-11-09T11:52:08,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741837_1013 (size=392) 2024-11-09T11:52:08,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741837_1013 (size=392) 2024-11-09T11:52:08,619 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e19829f75b449b790784d5d5f82419ed, NAME => 'TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be 2024-11-09T11:52:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741838_1014 (size=51) 2024-11-09T11:52:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741838_1014 (size=51) 2024-11-09T11:52:08,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741838_1014 (size=51) 2024-11-09T11:52:08,629 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:08,629 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing e19829f75b449b790784d5d5f82419ed, disabling compactions & flushes 2024-11-09T11:52:08,629 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,629 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,629 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. after waiting 0 ms 2024-11-09T11:52:08,629 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,629 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,630 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for e19829f75b449b790784d5d5f82419ed: Waiting for close lock at 1731153128629Disabling compacts and flushes for region at 1731153128629Disabling writes for close at 1731153128629Writing region close event to WAL at 1731153128629Closed at 1731153128629 2024-11-09T11:52:08,631 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T11:52:08,632 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731153128631"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731153128631"}]},"ts":"1731153128631"} 2024-11-09T11:52:08,634 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T11:52:08,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T11:52:08,636 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731153128636"}]},"ts":"1731153128636"} 2024-11-09T11:52:08,639 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T11:52:08,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3264b4bbda9f=0} racks are {/default-rack=0} 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T11:52:08,640 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T11:52:08,640 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T11:52:08,640 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T11:52:08,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T11:52:08,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e19829f75b449b790784d5d5f82419ed, ASSIGN}] 2024-11-09T11:52:08,642 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e19829f75b449b790784d5d5f82419ed, ASSIGN 2024-11-09T11:52:08,644 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e19829f75b449b790784d5d5f82419ed, ASSIGN; state=OFFLINE, location=3264b4bbda9f,44011,1731153127155; forceNewPlan=false, retain=false 2024-11-09T11:52:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:08,794 INFO [3264b4bbda9f:34511 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T11:52:08,795 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e19829f75b449b790784d5d5f82419ed, regionState=OPENING, regionLocation=3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:08,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e19829f75b449b790784d5d5f82419ed, ASSIGN because future has completed 2024-11-09T11:52:08,799 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e19829f75b449b790784d5d5f82419ed, server=3264b4bbda9f,44011,1731153127155}] 2024-11-09T11:52:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:08,953 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T11:52:08,956 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57051, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T11:52:08,966 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,966 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e19829f75b449b790784d5d5f82419ed, NAME => 'TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed.', STARTKEY => '', ENDKEY => ''} 2024-11-09T11:52:08,967 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,967 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T11:52:08,967 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,967 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,969 INFO [StoreOpener-e19829f75b449b790784d5d5f82419ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,971 INFO [StoreOpener-e19829f75b449b790784d5d5f82419ed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e19829f75b449b790784d5d5f82419ed columnFamilyName cf 2024-11-09T11:52:08,971 DEBUG [StoreOpener-e19829f75b449b790784d5d5f82419ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T11:52:08,972 INFO [StoreOpener-e19829f75b449b790784d5d5f82419ed-1 {}] regionserver.HStore(327): Store=e19829f75b449b790784d5d5f82419ed/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T11:52:08,972 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,973 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,973 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,974 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,974 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,975 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,978 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T11:52:08,979 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e19829f75b449b790784d5d5f82419ed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74044915, jitterRate=0.10335521399974823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T11:52:08,979 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:08,980 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e19829f75b449b790784d5d5f82419ed: Running coprocessor pre-open hook at 1731153128967Writing region info on filesystem at 1731153128967Initializing all the Stores at 1731153128969 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731153128969Cleaning up temporary data from old regions at 1731153128974 (+5 ms)Running coprocessor post-open hooks at 1731153128979 (+5 ms)Region opened successfully at 1731153128980 (+1 ms) 2024-11-09T11:52:08,981 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed., pid=6, masterSystemTime=1731153128952 2024-11-09T11:52:08,984 DEBUG [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,984 INFO [RS_OPEN_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:08,986 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e19829f75b449b790784d5d5f82419ed, regionState=OPEN, openSeqNum=2, regionLocation=3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:08,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e19829f75b449b790784d5d5f82419ed, server=3264b4bbda9f,44011,1731153127155 because future has completed 2024-11-09T11:52:08,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T11:52:08,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e19829f75b449b790784d5d5f82419ed, server=3264b4bbda9f,44011,1731153127155 in 192 msec 2024-11-09T11:52:08,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T11:52:08,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e19829f75b449b790784d5d5f82419ed, ASSIGN in 355 msec 2024-11-09T11:52:09,001 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T11:52:09,001 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731153129001"}]},"ts":"1731153129001"} 2024-11-09T11:52:09,004 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T11:52:09,006 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T11:52:09,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 405 msec 2024-11-09T11:52:09,109 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-09T11:52:09,109 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-09T11:52:09,112 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T11:52:09,112 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-09T11:52:09,112 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-09T11:52:09,112 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-09T11:52:09,114 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-11-09T11:52:09,114 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-11-09T11:52:09,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T11:52:09,240 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T11:52:09,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T11:52:09,240 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T11:52:09,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T11:52:09,244 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T11:52:09,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T11:52:09,247 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed., hostname=3264b4bbda9f,44011,1731153127155, seqNum=2] 2024-11-09T11:52:09,247 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T11:52:09,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44058, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T11:52:09,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T11:52:09,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T11:52:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:09,256 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T11:52:09,257 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T11:52:09,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T11:52:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:09,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44011 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T11:52:09,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,414 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing e19829f75b449b790784d5d5f82419ed 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T11:52:09,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/.tmp/cf/fb973d57c4ae487cb98cd58b630c3e48 is 36, key is row/cf:cq/1731153129250/Put/seqid=0 2024-11-09T11:52:09,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741839_1015 (size=4787) 2024-11-09T11:52:09,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741839_1015 (size=4787) 2024-11-09T11:52:09,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741839_1015 (size=4787) 2024-11-09T11:52:09,447 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/.tmp/cf/fb973d57c4ae487cb98cd58b630c3e48 2024-11-09T11:52:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/.tmp/cf/fb973d57c4ae487cb98cd58b630c3e48 as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/cf/fb973d57c4ae487cb98cd58b630c3e48 2024-11-09T11:52:09,466 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/cf/fb973d57c4ae487cb98cd58b630c3e48, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T11:52:09,468 INFO [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for e19829f75b449b790784d5d5f82419ed in 55ms, sequenceid=5, compaction requested=false 2024-11-09T11:52:09,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for e19829f75b449b790784d5d5f82419ed: 2024-11-09T11:52:09,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3264b4bbda9f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T11:52:09,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T11:52:09,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T11:52:09,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 213 msec 2024-11-09T11:52:09,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 223 msec 2024-11-09T11:52:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T11:52:09,570 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T11:52:09,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T11:52:09,580 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T11:52:09,581 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:09,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,581 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T11:52:09,582 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T11:52:09,582 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1122673142, stopped=false 2024-11-09T11:52:09,582 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3264b4bbda9f,34511,1731153126917 2024-11-09T11:52:09,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:09,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:09,663 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:09,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T11:52:09,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:09,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:09,663 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T11:52:09,663 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:09,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:09,664 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T11:52:09,664 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:09,664 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:09,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:09,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:09,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T11:52:09,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3264b4bbda9f,37851,1731153127079' ***** 2024-11-09T11:52:09,665 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T11:52:09,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3264b4bbda9f,43811,1731153127117' ***** 2024-11-09T11:52:09,665 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T11:52:09,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3264b4bbda9f,44011,1731153127155' ***** 2024-11-09T11:52:09,666 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T11:52:09,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T11:52:09,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T11:52:09,666 INFO [RS:0;3264b4bbda9f:37851 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T11:52:09,666 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T11:52:09,666 INFO [RS:0;3264b4bbda9f:37851 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T11:52:09,666 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T11:52:09,666 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(959): stopping server 3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:09,666 INFO [RS:1;3264b4bbda9f:43811 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T11:52:09,666 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:09,666 INFO [RS:1;3264b4bbda9f:43811 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T11:52:09,666 INFO [RS:2;3264b4bbda9f:44011 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T11:52:09,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T11:52:09,666 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(959): stopping server 3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:09,667 INFO [RS:0;3264b4bbda9f:37851 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3264b4bbda9f:37851. 2024-11-09T11:52:09,667 INFO [RS:2;3264b4bbda9f:44011 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T11:52:09,667 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:09,667 INFO [RS:1;3264b4bbda9f:43811 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3264b4bbda9f:43811. 2024-11-09T11:52:09,667 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(3091): Received CLOSE for e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:09,667 DEBUG [RS:0;3264b4bbda9f:37851 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:09,667 DEBUG [RS:1;3264b4bbda9f:43811 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:09,667 DEBUG [RS:0;3264b4bbda9f:37851 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,667 DEBUG [RS:1;3264b4bbda9f:43811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,667 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T11:52:09,667 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(976): stopping server 3264b4bbda9f,37851,1731153127079; all regions closed. 2024-11-09T11:52:09,667 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T11:52:09,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T11:52:09,667 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T11:52:09,667 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T11:52:09,667 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(959): stopping server 3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:09,668 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:09,668 INFO [RS:2;3264b4bbda9f:44011 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3264b4bbda9f:44011. 2024-11-09T11:52:09,668 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e19829f75b449b790784d5d5f82419ed, disabling compactions & flushes 2024-11-09T11:52:09,668 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T11:52:09,668 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-09T11:52:09,668 INFO [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,668 DEBUG [RS:2;3264b4bbda9f:44011 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T11:52:09,668 DEBUG [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T11:52:09,668 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,668 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T11:52:09,668 DEBUG [RS:2;3264b4bbda9f:44011 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,668 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T11:52:09,668 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. after waiting 0 ms 2024-11-09T11:52:09,668 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,668 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T11:52:09,668 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T11:52:09,668 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,668 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1325): Online Regions={e19829f75b449b790784d5d5f82419ed=TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed.} 2024-11-09T11:52:09,669 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T11:52:09,669 DEBUG [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1351): Waiting on e19829f75b449b790784d5d5f82419ed 2024-11-09T11:52:09,669 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T11:52:09,669 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,669 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,669 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T11:52:09,669 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,669 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741833_1009 (size=93) 2024-11-09T11:52:09,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741833_1009 (size=93) 2024-11-09T11:52:09,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741833_1009 (size=93) 2024-11-09T11:52:09,678 DEBUG [RS:0;3264b4bbda9f:37851 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs 2024-11-09T11:52:09,678 INFO [RS:0;3264b4bbda9f:37851 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3264b4bbda9f%2C37851%2C1731153127079:(num 1731153128039) 2024-11-09T11:52:09,678 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/default/TestHBaseWalOnEC/e19829f75b449b790784d5d5f82419ed/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T11:52:09,678 DEBUG [RS:0;3264b4bbda9f:37851 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,678 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.ChoreService(370): Chore service for: regionserver/3264b4bbda9f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T11:52:09,679 INFO [regionserver/3264b4bbda9f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:09,679 INFO [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,679 INFO [RS:0;3264b4bbda9f:37851 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37851 2024-11-09T11:52:09,679 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e19829f75b449b790784d5d5f82419ed: Waiting for close lock at 1731153129668Running coprocessor pre-close hooks at 1731153129668Disabling compacts and flushes for region at 1731153129668Disabling writes for close at 1731153129668Writing region close event to WAL at 1731153129673 (+5 ms)Running coprocessor post-close hooks at 1731153129679 (+6 ms)Closed at 1731153129679 2024-11-09T11:52:09,680 DEBUG [RS_CLOSE_REGION-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed. 2024-11-09T11:52:09,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T11:52:09,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3264b4bbda9f,37851,1731153127079 2024-11-09T11:52:09,687 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:09,687 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007fe7648f8748@51345c0d rejected from java.util.concurrent.ThreadPoolExecutor@550bd2f3[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-09T11:52:09,693 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/info/3a8cf2a4ec2b4c689617807dfe487ba5 is 153, key is TestHBaseWalOnEC,,1731153128601.e19829f75b449b790784d5d5f82419ed./info:regioninfo/1731153128986/Put/seqid=0 2024-11-09T11:52:09,694 WARN [IPC Server handler 1 on default port 44375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T11:52:09,694 WARN [IPC Server handler 1 on default port 44375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T11:52:09,694 WARN [IPC Server handler 1 on default port 44375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T11:52:09,698 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3264b4bbda9f,37851,1731153127079] 2024-11-09T11:52:09,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741840_1016 (size=6637) 2024-11-09T11:52:09,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741840_1016 (size=6637) 2024-11-09T11:52:09,700 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/info/3a8cf2a4ec2b4c689617807dfe487ba5 2024-11-09T11:52:09,702 INFO [regionserver/3264b4bbda9f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:09,706 INFO [regionserver/3264b4bbda9f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:09,708 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3264b4bbda9f,37851,1731153127079 already deleted, retry=false 2024-11-09T11:52:09,708 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3264b4bbda9f,37851,1731153127079 expired; onlineServers=2 2024-11-09T11:52:09,711 INFO [regionserver/3264b4bbda9f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:09,722 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/ns/77c6ff8d2d6a4f908b75025a3fc92566 is 43, key is default/ns:d/1731153128473/Put/seqid=0 2024-11-09T11:52:09,724 WARN [IPC Server handler 0 on default port 44375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T11:52:09,724 WARN [IPC Server handler 0 on default port 44375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T11:52:09,724 WARN [IPC Server handler 0 on default port 44375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T11:52:09,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741841_1017 (size=5153) 2024-11-09T11:52:09,729 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/ns/77c6ff8d2d6a4f908b75025a3fc92566 2024-11-09T11:52:09,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741841_1017 (size=5153) 2024-11-09T11:52:09,754 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/table/a0535242b2f341a6a8ba964986be3d67 is 52, key is TestHBaseWalOnEC/table:state/1731153129001/Put/seqid=0 2024-11-09T11:52:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741842_1018 (size=5249) 2024-11-09T11:52:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741842_1018 (size=5249) 2024-11-09T11:52:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741842_1018 (size=5249) 2024-11-09T11:52:09,761 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/table/a0535242b2f341a6a8ba964986be3d67 2024-11-09T11:52:09,769 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/info/3a8cf2a4ec2b4c689617807dfe487ba5 as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/info/3a8cf2a4ec2b4c689617807dfe487ba5 2024-11-09T11:52:09,777 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/info/3a8cf2a4ec2b4c689617807dfe487ba5, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T11:52:09,779 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/ns/77c6ff8d2d6a4f908b75025a3fc92566 as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/ns/77c6ff8d2d6a4f908b75025a3fc92566 2024-11-09T11:52:09,786 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/ns/77c6ff8d2d6a4f908b75025a3fc92566, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T11:52:09,787 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/.tmp/table/a0535242b2f341a6a8ba964986be3d67 as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/table/a0535242b2f341a6a8ba964986be3d67 2024-11-09T11:52:09,794 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/table/a0535242b2f341a6a8ba964986be3d67, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T11:52:09,795 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false 2024-11-09T11:52:09,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:09,798 INFO [RS:0;3264b4bbda9f:37851 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:09,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37851-0x1011f70619d0001, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:09,798 INFO [RS:0;3264b4bbda9f:37851 {}] regionserver.HRegionServer(1031): Exiting; stopping=3264b4bbda9f,37851,1731153127079; zookeeper connection closed. 2024-11-09T11:52:09,798 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c0498f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c0498f 2024-11-09T11:52:09,801 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T11:52:09,802 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T11:52:09,802 INFO [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T11:52:09,802 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731153129668Running coprocessor pre-close hooks at 1731153129668Disabling compacts and flushes for region at 1731153129668Disabling writes for close at 1731153129669 (+1 ms)Obtaining lock to block concurrent updates at 1731153129669Preparing flush snapshotting stores in 1588230740 at 1731153129669Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731153129669Flushing stores of hbase:meta,,1.1588230740 at 1731153129671 (+2 ms)Flushing 1588230740/info: creating writer at 1731153129671Flushing 1588230740/info: appending metadata at 1731153129692 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731153129692Flushing 1588230740/ns: creating writer at 1731153129706 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731153129722 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731153129722Flushing 1588230740/table: creating writer at 1731153129737 (+15 ms)Flushing 1588230740/table: appending metadata at 1731153129753 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731153129753Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55a5ef44: reopening flushed file at 1731153129768 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b32eb88: reopening flushed file at 1731153129778 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e9ede6b: reopening flushed file at 1731153129786 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false at 1731153129796 (+10 ms)Writing region close event to WAL at 1731153129797 (+1 ms)Running coprocessor post-close hooks at 1731153129802 (+5 ms)Closed at 1731153129802 2024-11-09T11:52:09,802 DEBUG [RS_CLOSE_META-regionserver/3264b4bbda9f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T11:52:09,868 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(976): stopping server 3264b4bbda9f,43811,1731153127117; all regions closed. 2024-11-09T11:52:09,869 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(976): stopping server 3264b4bbda9f,44011,1731153127155; all regions closed. 2024-11-09T11:52:09,869 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,869 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,869 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,869 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,869 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,869 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,869 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,870 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,870 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741835_1011 (size=1298) 2024-11-09T11:52:09,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741835_1011 (size=1298) 2024-11-09T11:52:09,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741836_1012 (size=2751) 2024-11-09T11:52:09,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741836_1012 (size=2751) 2024-11-09T11:52:09,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741836_1012 (size=2751) 2024-11-09T11:52:09,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741835_1011 (size=1298) 2024-11-09T11:52:09,876 DEBUG [RS:2;3264b4bbda9f:44011 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3264b4bbda9f%2C44011%2C1731153127155:(num 1731153128051) 2024-11-09T11:52:09,876 DEBUG [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs 2024-11-09T11:52:09,876 DEBUG [RS:2;3264b4bbda9f:44011 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,876 INFO [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3264b4bbda9f%2C43811%2C1731153127117.meta:.meta(num 1731153128390) 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.ChoreService(370): Chore service for: regionserver/3264b4bbda9f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:09,876 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T11:52:09,876 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T11:52:09,876 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,876 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:09,876 INFO [regionserver/3264b4bbda9f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:09,877 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,877 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:09,877 INFO [RS:2;3264b4bbda9f:44011 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44011 2024-11-09T11:52:09,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741834_1010 (size=93) 2024-11-09T11:52:09,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741834_1010 (size=93) 2024-11-09T11:52:09,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741834_1010 (size=93) 2024-11-09T11:52:09,882 DEBUG [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/oldWALs 2024-11-09T11:52:09,882 INFO [RS:1;3264b4bbda9f:43811 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3264b4bbda9f%2C43811%2C1731153127117:(num 1731153128051) 2024-11-09T11:52:09,882 DEBUG [RS:1;3264b4bbda9f:43811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T11:52:09,882 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T11:52:09,883 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:09,883 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.ChoreService(370): Chore service for: regionserver/3264b4bbda9f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:09,883 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:09,883 INFO [regionserver/3264b4bbda9f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:09,883 INFO [RS:1;3264b4bbda9f:43811 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43811 2024-11-09T11:52:09,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T11:52:09,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3264b4bbda9f,44011,1731153127155 2024-11-09T11:52:09,887 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:09,887 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007fe7648f8748@dacf23e rejected from java.util.concurrent.ThreadPoolExecutor@9937753[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-09T11:52:09,898 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3264b4bbda9f,43811,1731153127117 2024-11-09T11:52:09,898 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:09,908 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3264b4bbda9f,44011,1731153127155] 2024-11-09T11:52:09,930 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3264b4bbda9f,44011,1731153127155 already deleted, retry=false 2024-11-09T11:52:09,930 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3264b4bbda9f,44011,1731153127155 expired; onlineServers=1 2024-11-09T11:52:09,930 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3264b4bbda9f,43811,1731153127117] 2024-11-09T11:52:09,940 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3264b4bbda9f,43811,1731153127117 already deleted, retry=false 2024-11-09T11:52:09,940 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3264b4bbda9f,43811,1731153127117 expired; onlineServers=0 2024-11-09T11:52:09,940 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3264b4bbda9f,34511,1731153126917' ***** 2024-11-09T11:52:09,940 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T11:52:09,940 INFO [M:0;3264b4bbda9f:34511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T11:52:09,940 INFO [M:0;3264b4bbda9f:34511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T11:52:09,941 DEBUG [M:0;3264b4bbda9f:34511 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T11:52:09,941 DEBUG [M:0;3264b4bbda9f:34511 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T11:52:09,941 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.large.0-1731153127730 {}] cleaner.HFileCleaner(306): Exit Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.large.0-1731153127730,5,FailOnTimeoutGroup] 2024-11-09T11:52:09,941 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T11:52:09,941 DEBUG [master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.small.0-1731153127731 {}] cleaner.HFileCleaner(306): Exit Thread[master/3264b4bbda9f:0:becomeActiveMaster-HFileCleaner.small.0-1731153127731,5,FailOnTimeoutGroup] 2024-11-09T11:52:09,941 INFO [M:0;3264b4bbda9f:34511 {}] hbase.ChoreService(370): Chore service for: master/3264b4bbda9f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T11:52:09,941 INFO [M:0;3264b4bbda9f:34511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T11:52:09,942 DEBUG [M:0;3264b4bbda9f:34511 {}] master.HMaster(1795): Stopping service threads 2024-11-09T11:52:09,942 INFO [M:0;3264b4bbda9f:34511 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T11:52:09,942 INFO [M:0;3264b4bbda9f:34511 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T11:52:09,942 INFO [M:0;3264b4bbda9f:34511 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T11:52:09,943 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T11:52:09,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T11:52:09,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T11:52:09,951 DEBUG [M:0;3264b4bbda9f:34511 {}] zookeeper.ZKUtil(347): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T11:52:09,951 WARN [M:0;3264b4bbda9f:34511 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T11:52:09,952 INFO [M:0;3264b4bbda9f:34511 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/.lastflushedseqids 2024-11-09T11:52:09,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741843_1019 (size=127) 2024-11-09T11:52:09,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741843_1019 (size=127) 2024-11-09T11:52:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741843_1019 (size=127) 2024-11-09T11:52:09,962 INFO [M:0;3264b4bbda9f:34511 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T11:52:09,962 INFO [M:0;3264b4bbda9f:34511 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T11:52:09,962 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T11:52:09,962 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:09,962 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:09,962 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T11:52:09,963 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:09,963 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-09T11:52:09,980 DEBUG [M:0;3264b4bbda9f:34511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f6a7b5bd85cb4c4d8c2ba264a5e90a48 is 82, key is hbase:meta,,1/info:regioninfo/1731153128423/Put/seqid=0 2024-11-09T11:52:09,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741844_1020 (size=5672) 2024-11-09T11:52:09,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741844_1020 (size=5672) 2024-11-09T11:52:09,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741844_1020 (size=5672) 2024-11-09T11:52:09,989 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f6a7b5bd85cb4c4d8c2ba264a5e90a48 2024-11-09T11:52:10,008 DEBUG [M:0;3264b4bbda9f:34511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00381aa3e99c4cfc8d169e2a295fc1bd is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731153129008/Put/seqid=0 2024-11-09T11:52:10,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:10,009 INFO [RS:2;3264b4bbda9f:44011 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:10,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44011-0x1011f70619d0003, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:10,009 INFO [RS:2;3264b4bbda9f:44011 {}] regionserver.HRegionServer(1031): Exiting; stopping=3264b4bbda9f,44011,1731153127155; zookeeper connection closed. 2024-11-09T11:52:10,009 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a428eba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a428eba 2024-11-09T11:52:10,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741845_1021 (size=6439) 2024-11-09T11:52:10,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741845_1021 (size=6439) 2024-11-09T11:52:10,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741845_1021 (size=6439) 2024-11-09T11:52:10,016 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00381aa3e99c4cfc8d169e2a295fc1bd 2024-11-09T11:52:10,019 INFO [RS:1;3264b4bbda9f:43811 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:10,019 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:10,019 INFO [RS:1;3264b4bbda9f:43811 {}] regionserver.HRegionServer(1031): Exiting; stopping=3264b4bbda9f,43811,1731153127117; zookeeper connection closed. 2024-11-09T11:52:10,019 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1011f70619d0002, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:10,019 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@c7b5429 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@c7b5429 2024-11-09T11:52:10,020 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T11:52:10,039 DEBUG [M:0;3264b4bbda9f:34511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45c535e0caa74324b14b11c1bf7cfdaa is 69, key is 3264b4bbda9f,37851,1731153127079/rs:state/1731153127838/Put/seqid=0 2024-11-09T11:52:10,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741846_1022 (size=5294) 2024-11-09T11:52:10,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741846_1022 (size=5294) 2024-11-09T11:52:10,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741846_1022 (size=5294) 2024-11-09T11:52:10,046 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45c535e0caa74324b14b11c1bf7cfdaa 2024-11-09T11:52:10,054 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f6a7b5bd85cb4c4d8c2ba264a5e90a48 as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f6a7b5bd85cb4c4d8c2ba264a5e90a48 2024-11-09T11:52:10,062 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f6a7b5bd85cb4c4d8c2ba264a5e90a48, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T11:52:10,064 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00381aa3e99c4cfc8d169e2a295fc1bd as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00381aa3e99c4cfc8d169e2a295fc1bd 2024-11-09T11:52:10,071 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00381aa3e99c4cfc8d169e2a295fc1bd, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T11:52:10,072 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45c535e0caa74324b14b11c1bf7cfdaa as hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45c535e0caa74324b14b11c1bf7cfdaa 2024-11-09T11:52:10,079 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44375/user/jenkins/test-data/818291a1-a4aa-fe31-c52f-e1fecf12f2be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45c535e0caa74324b14b11c1bf7cfdaa, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T11:52:10,080 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=72, compaction requested=false 2024-11-09T11:52:10,082 INFO [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T11:52:10,082 DEBUG [M:0;3264b4bbda9f:34511 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731153129962Disabling compacts and flushes for region at 1731153129962Disabling writes for close at 1731153129962Obtaining lock to block concurrent updates at 1731153129963 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731153129963Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731153129963Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731153129964 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731153129964Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731153129980 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731153129980Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731153129994 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731153130008 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731153130008Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731153130023 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731153130038 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731153130039 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a4565a2: reopening flushed file at 1731153130053 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19ef9109: reopening flushed file at 1731153130063 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d9e6344: reopening flushed file at 1731153130071 (+8 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=72, compaction requested=false at 1731153130080 (+9 ms)Writing region close event to WAL at 1731153130082 (+2 ms)Closed at 1731153130082 2024-11-09T11:52:10,083 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:10,083 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:10,083 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:10,083 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:10,083 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T11:52:10,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43611 is added to blk_1073741830_1006 (size=32674) 2024-11-09T11:52:10,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34609 is added to blk_1073741830_1006 (size=32674) 2024-11-09T11:52:10,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46067 is added to blk_1073741830_1006 (size=32674) 2024-11-09T11:52:10,087 INFO [M:0;3264b4bbda9f:34511 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T11:52:10,087 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T11:52:10,087 INFO [M:0;3264b4bbda9f:34511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34511 2024-11-09T11:52:10,087 INFO [M:0;3264b4bbda9f:34511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T11:52:10,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:10,198 INFO [M:0;3264b4bbda9f:34511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T11:52:10,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34511-0x1011f70619d0000, quorum=127.0.0.1:50195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T11:52:10,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4038045d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:10,202 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b9e74e2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:10,202 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:10,203 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17444eb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:10,203 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@659fda5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:10,206 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T11:52:10,206 WARN [BP-1269958615-172.17.0.2-1731153124222 heartbeating to localhost/127.0.0.1:44375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T11:52:10,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T11:52:10,206 WARN [BP-1269958615-172.17.0.2-1731153124222 heartbeating to localhost/127.0.0.1:44375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1269958615-172.17.0.2-1731153124222 (Datanode Uuid 0a67e403-de3e-402e-8a69-025f76f986aa) service to localhost/127.0.0.1:44375 2024-11-09T11:52:10,207 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data5/current/BP-1269958615-172.17.0.2-1731153124222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:10,208 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data6/current/BP-1269958615-172.17.0.2-1731153124222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:10,208 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T11:52:10,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3373c13a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:10,213 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@619667{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:10,213 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:10,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e802acb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:10,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@143655f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:10,215 WARN [BP-1269958615-172.17.0.2-1731153124222 heartbeating to localhost/127.0.0.1:44375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T11:52:10,215 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T11:52:10,215 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T11:52:10,215 WARN [BP-1269958615-172.17.0.2-1731153124222 heartbeating to localhost/127.0.0.1:44375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1269958615-172.17.0.2-1731153124222 (Datanode Uuid 9e558314-2543-4f0c-bc67-28a1e4577c41) service to localhost/127.0.0.1:44375 2024-11-09T11:52:10,216 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data3/current/BP-1269958615-172.17.0.2-1731153124222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:10,216 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data4/current/BP-1269958615-172.17.0.2-1731153124222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:10,217 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T11:52:10,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f76b201{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T11:52:10,219 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@28829bc6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:10,219 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:10,219 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47fad3b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:10,219 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ae83c3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:10,220 WARN [BP-1269958615-172.17.0.2-1731153124222 heartbeating to localhost/127.0.0.1:44375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T11:52:10,220 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T11:52:10,220 WARN [BP-1269958615-172.17.0.2-1731153124222 heartbeating to localhost/127.0.0.1:44375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1269958615-172.17.0.2-1731153124222 (Datanode Uuid c3f16103-019c-437f-ba22-985aa1d189b7) service to localhost/127.0.0.1:44375 2024-11-09T11:52:10,220 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T11:52:10,221 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data1/current/BP-1269958615-172.17.0.2-1731153124222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:10,221 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/cluster_5d6f182f-f1ef-f355-dc48-e75a6dcd03e9/data/data2/current/BP-1269958615-172.17.0.2-1731153124222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T11:52:10,222 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T11:52:10,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5614c6c6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T11:52:10,227 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@236a8141{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T11:52:10,227 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T11:52:10,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@193f4fa8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T11:52:10,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3950f25b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b99e04a5-d65e-d09e-8df3-507509fe1b3f/hadoop.log.dir/,STOPPED} 2024-11-09T11:52:10,234 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T11:52:10,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T11:52:10,289 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=151 (was 90) - Thread LEAK? -, OpenFileDescriptor=518 (was 437) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=155 (was 134) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5838 (was 5862)