2024-11-12 18:31:14,387 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 18:31:14,400 main DEBUG Took 0.010667 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-12 18:31:14,400 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-12 18:31:14,400 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-12 18:31:14,401 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-12 18:31:14,402 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,421 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-12 18:31:14,433 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,435 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,436 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,436 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,437 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,437 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,438 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,439 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,440 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,440 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,441 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,442 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,442 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,443 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,444 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,444 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,445 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,445 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,446 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,446 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,447 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,448 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,448 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:31:14,449 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,449 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-12 18:31:14,452 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:31:14,453 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-12 18:31:14,456 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-12 18:31:14,456 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-12 18:31:14,458 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-12 18:31:14,459 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-12 18:31:14,471 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-12 18:31:14,474 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-12 18:31:14,477 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-12 18:31:14,477 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-12 18:31:14,478 main DEBUG createAppenders(={Console}) 2024-11-12 18:31:14,479 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-12 18:31:14,479 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 18:31:14,479 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-12 18:31:14,480 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-12 18:31:14,481 main DEBUG OutputStream closed 2024-11-12 18:31:14,481 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-12 18:31:14,481 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-12 18:31:14,482 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-12 18:31:14,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-12 18:31:14,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-12 18:31:14,572 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-12 18:31:14,574 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-12 18:31:14,574 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-12 18:31:14,575 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-12 18:31:14,575 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-12 18:31:14,576 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-12 18:31:14,576 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-12 18:31:14,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-12 18:31:14,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-12 18:31:14,578 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-12 18:31:14,578 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-12 18:31:14,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-12 18:31:14,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-12 18:31:14,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-12 18:31:14,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-12 18:31:14,581 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-12 18:31:14,583 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12 18:31:14,584 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-12 18:31:14,584 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-12 18:31:14,584 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-12T18:31:14,600 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-12 18:31:14,603 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-12 18:31:14,603 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12T18:31:14,905 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90 2024-11-12T18:31:14,934 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf, deleteOnExit=true 2024-11-12T18:31:14,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/test.cache.data in system properties and HBase conf 2024-11-12T18:31:14,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:31:14,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:31:14,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:31:14,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:31:14,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:31:15,032 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-12T18:31:15,125 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:31:15,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:31:15,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:31:15,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:31:15,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:31:15,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:31:15,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:31:15,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:31:15,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:31:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:31:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:31:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:31:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:31:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:31:15,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:31:15,994 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-12T18:31:16,070 INFO [Time-limited test {}] log.Log(170): Logging initialized @2361ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-12T18:31:16,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:16,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:16,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:16,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:16,233 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:16,246 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:16,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:16,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:16,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58dbf239{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/java.io.tmpdir/jetty-localhost-45043-hadoop-hdfs-3_4_1-tests_jar-_-any-12770961613957375126/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:31:16,452 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:45043} 2024-11-12T18:31:16,452 INFO [Time-limited test {}] server.Server(415): Started @2744ms 2024-11-12T18:31:16,847 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:16,856 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:16,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:16,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:16,857 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:16,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:16,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:16,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65462677{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/java.io.tmpdir/jetty-localhost-35441-hadoop-hdfs-3_4_1-tests_jar-_-any-2237114589774161831/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:16,983 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:35441} 2024-11-12T18:31:16,983 INFO [Time-limited test {}] server.Server(415): Started @3275ms 2024-11-12T18:31:17,042 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:17,174 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:17,181 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:17,187 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:17,187 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:17,187 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:17,191 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:17,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:17,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@513cab2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/java.io.tmpdir/jetty-localhost-36301-hadoop-hdfs-3_4_1-tests_jar-_-any-2301561221706760398/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:17,322 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:36301} 2024-11-12T18:31:17,322 INFO [Time-limited test {}] server.Server(415): Started @3614ms 2024-11-12T18:31:17,325 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:17,367 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:17,372 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:17,379 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:17,379 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:17,379 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:17,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:17,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:17,505 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data1/current/BP-1849397982-172.17.0.3-1731436275747/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:17,505 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data3/current/BP-1849397982-172.17.0.3-1731436275747/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:17,505 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data4/current/BP-1849397982-172.17.0.3-1731436275747/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:17,505 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data2/current/BP-1849397982-172.17.0.3-1731436275747/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:17,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@653e6301{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/java.io.tmpdir/jetty-localhost-46125-hadoop-hdfs-3_4_1-tests_jar-_-any-13979509989050341445/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:17,519 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:46125} 2024-11-12T18:31:17,519 INFO [Time-limited test {}] server.Server(415): Started @3811ms 2024-11-12T18:31:17,522 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:17,555 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:17,556 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:17,637 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4d5a5c8d0f650303 with lease ID 0xf1e429304046cd50: Processing first storage report for DS-81d09389-2d6a-4795-bf4a-d478ee9792e2 from datanode DatanodeRegistration(127.0.0.1:33251, datanodeUuid=9648689b-98e9-4b66-827e-7a20ddee16d0, infoPort=40355, infoSecurePort=0, ipcPort=37467, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747) 2024-11-12T18:31:17,638 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data5/current/BP-1849397982-172.17.0.3-1731436275747/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:17,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d5a5c8d0f650303 with lease ID 0xf1e429304046cd50: from storage DS-81d09389-2d6a-4795-bf4a-d478ee9792e2 node DatanodeRegistration(127.0.0.1:33251, datanodeUuid=9648689b-98e9-4b66-827e-7a20ddee16d0, infoPort=40355, infoSecurePort=0, ipcPort=37467, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-12T18:31:17,639 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45b1560c4f02a76c with lease ID 0xf1e429304046cd4f: Processing first storage report for DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e from datanode DatanodeRegistration(127.0.0.1:34479, datanodeUuid=759c677e-5a6f-4018-9a5f-a97b86fdd325, infoPort=40879, infoSecurePort=0, ipcPort=44217, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747) 2024-11-12T18:31:17,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45b1560c4f02a76c with lease ID 0xf1e429304046cd4f: from storage DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e node DatanodeRegistration(127.0.0.1:34479, datanodeUuid=759c677e-5a6f-4018-9a5f-a97b86fdd325, infoPort=40879, infoSecurePort=0, ipcPort=44217, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:17,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4d5a5c8d0f650303 with lease ID 0xf1e429304046cd50: Processing first storage report for DS-46bd0cf3-b720-47fe-a322-3ef0487e2358 from datanode DatanodeRegistration(127.0.0.1:33251, datanodeUuid=9648689b-98e9-4b66-827e-7a20ddee16d0, infoPort=40355, infoSecurePort=0, ipcPort=37467, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747) 2024-11-12T18:31:17,640 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d5a5c8d0f650303 with lease ID 0xf1e429304046cd50: from storage DS-46bd0cf3-b720-47fe-a322-3ef0487e2358 node DatanodeRegistration(127.0.0.1:33251, datanodeUuid=9648689b-98e9-4b66-827e-7a20ddee16d0, infoPort=40355, infoSecurePort=0, ipcPort=37467, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:31:17,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45b1560c4f02a76c with lease ID 0xf1e429304046cd4f: Processing first storage report for DS-3a15742d-831e-48a2-88df-f8ed58d5175e from datanode DatanodeRegistration(127.0.0.1:34479, datanodeUuid=759c677e-5a6f-4018-9a5f-a97b86fdd325, infoPort=40879, infoSecurePort=0, ipcPort=44217, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747) 2024-11-12T18:31:17,641 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data6/current/BP-1849397982-172.17.0.3-1731436275747/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:17,641 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45b1560c4f02a76c with lease ID 0xf1e429304046cd4f: from storage DS-3a15742d-831e-48a2-88df-f8ed58d5175e node DatanodeRegistration(127.0.0.1:34479, datanodeUuid=759c677e-5a6f-4018-9a5f-a97b86fdd325, infoPort=40879, infoSecurePort=0, ipcPort=44217, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:17,666 WARN [Thread-123 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:17,671 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50f9435fcf17c3bf with lease ID 0xf1e429304046cd51: Processing first storage report for DS-0a164ef3-8c54-4141-abef-0fc4d11872c1 from datanode DatanodeRegistration(127.0.0.1:34103, datanodeUuid=b7e105da-d8b6-4e23-8534-3067521f61cf, infoPort=40871, infoSecurePort=0, ipcPort=38841, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747) 2024-11-12T18:31:17,671 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50f9435fcf17c3bf with lease ID 0xf1e429304046cd51: from storage DS-0a164ef3-8c54-4141-abef-0fc4d11872c1 node DatanodeRegistration(127.0.0.1:34103, datanodeUuid=b7e105da-d8b6-4e23-8534-3067521f61cf, infoPort=40871, infoSecurePort=0, ipcPort=38841, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:31:17,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50f9435fcf17c3bf with lease ID 0xf1e429304046cd51: Processing first storage report for DS-94806f8b-75df-484b-83f9-397ec22616ef from datanode DatanodeRegistration(127.0.0.1:34103, datanodeUuid=b7e105da-d8b6-4e23-8534-3067521f61cf, infoPort=40871, infoSecurePort=0, ipcPort=38841, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747) 2024-11-12T18:31:17,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50f9435fcf17c3bf with lease ID 0xf1e429304046cd51: from storage DS-94806f8b-75df-484b-83f9-397ec22616ef node DatanodeRegistration(127.0.0.1:34103, datanodeUuid=b7e105da-d8b6-4e23-8534-3067521f61cf, infoPort=40871, infoSecurePort=0, ipcPort=38841, storageInfo=lv=-57;cid=testClusterID;nsid=1831279225;c=1731436275747), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:17,909 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90 2024-11-12T18:31:17,991 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-12T18:31:18,051 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=148, ProcessCount=11, AvailableMemoryMB=6691 2024-11-12T18:31:18,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:31:18,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-12T18:31:18,181 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/zookeeper_0, clientPort=61059, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:31:18,193 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61059 2024-11-12T18:31:18,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:18,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:18,299 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:18,300 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:18,359 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:56838 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:34103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56838 dst: /127.0.0.1:34103 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:18,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-12T18:31:18,782 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:18,792 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98 with version=8 2024-11-12T18:31:18,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/hbase-staging 2024-11-12T18:31:18,907 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-12T18:31:19,174 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:19,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,191 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:19,191 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,191 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:19,335 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:31:19,399 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-12T18:31:19,408 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-12T18:31:19,413 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:19,445 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 24819 (auto-detected) 2024-11-12T18:31:19,446 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-12T18:31:19,469 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39667 2024-11-12T18:31:19,494 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39667 connecting to ZooKeeper ensemble=127.0.0.1:61059 2024-11-12T18:31:19,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396670x0, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:19,539 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39667-0x1003542fc2a0000 connected 2024-11-12T18:31:19,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:19,602 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98, hbase.cluster.distributed=false 2024-11-12T18:31:19,636 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:19,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39667 2024-11-12T18:31:19,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39667 2024-11-12T18:31:19,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39667 2024-11-12T18:31:19,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39667 2024-11-12T18:31:19,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39667 2024-11-12T18:31:19,777 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:19,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,780 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:19,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:19,783 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:19,786 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:19,787 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40023 2024-11-12T18:31:19,789 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40023 connecting to ZooKeeper ensemble=127.0.0.1:61059 2024-11-12T18:31:19,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,802 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400230x0, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:19,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40023-0x1003542fc2a0001 connected 2024-11-12T18:31:19,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:400230x0, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:19,810 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:19,822 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:19,824 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:19,830 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:19,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40023 2024-11-12T18:31:19,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40023 2024-11-12T18:31:19,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40023 2024-11-12T18:31:19,834 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40023 2024-11-12T18:31:19,834 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40023 2024-11-12T18:31:19,852 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:19,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,853 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:19,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:19,854 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:19,854 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:19,855 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35853 2024-11-12T18:31:19,857 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35853 connecting to ZooKeeper ensemble=127.0.0.1:61059 2024-11-12T18:31:19,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:358530x0, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:19,870 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35853-0x1003542fc2a0002 connected 2024-11-12T18:31:19,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:19,871 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:19,872 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:19,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:19,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:19,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35853 2024-11-12T18:31:19,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35853 2024-11-12T18:31:19,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35853 2024-11-12T18:31:19,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35853 2024-11-12T18:31:19,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35853 2024-11-12T18:31:19,912 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:19,913 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,913 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,913 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:19,913 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:19,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:19,914 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:19,914 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:19,915 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33815 2024-11-12T18:31:19,918 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33815 connecting to ZooKeeper ensemble=127.0.0.1:61059 2024-11-12T18:31:19,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:19,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338150x0, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:19,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:19,931 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33815-0x1003542fc2a0003 connected 2024-11-12T18:31:19,932 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:19,935 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:19,936 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:19,938 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:19,938 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33815 2024-11-12T18:31:19,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33815 2024-11-12T18:31:19,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33815 2024-11-12T18:31:19,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33815 2024-11-12T18:31:19,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33815 2024-11-12T18:31:19,959 DEBUG [M:0;9911683f163c:39667 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:39667 2024-11-12T18:31:19,960 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,39667,1731436278966 2024-11-12T18:31:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:19,967 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:19,969 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,39667,1731436278966 2024-11-12T18:31:19,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:19,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:19,993 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:19,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:19,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:19,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:19,993 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:19,995 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:31:19,996 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,39667,1731436278966 from backup master directory 2024-11-12T18:31:19,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,39667,1731436278966 2024-11-12T18:31:20,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:20,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:20,000 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:20,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:20,000 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:20,001 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,39667,1731436278966 2024-11-12T18:31:20,003 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-12T18:31:20,005 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-12T18:31:20,079 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/hbase.id] with ID: b5d8d377-8d61-4767-a2bf-2832c922f6f2 2024-11-12T18:31:20,079 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/.tmp/hbase.id 2024-11-12T18:31:20,087 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,088 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,091 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:41084 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41084 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:20,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-12T18:31:20,098 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:20,098 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/.tmp/hbase.id]:[hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/hbase.id] 2024-11-12T18:31:20,148 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:20,153 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:31:20,174 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-12T18:31:20,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,178 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,190 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,191 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:33330 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:33251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33330 dst: /127.0.0.1:33251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:20,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-12T18:31:20,201 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:20,218 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:31:20,220 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:31:20,227 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T18:31:20,258 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,258 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,261 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:41116 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41116 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:20,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-12T18:31:20,269 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:20,288 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store 2024-11-12T18:31:20,308 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,308 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:41140 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41140 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:20,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-12T18:31:20,317 WARN [master/9911683f163c:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:20,322 INFO [master/9911683f163c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-12T18:31:20,325 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:20,326 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:31:20,327 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:20,327 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:20,328 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:31:20,328 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:20,329 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:20,330 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436280326Disabling compacts and flushes for region at 1731436280326Disabling writes for close at 1731436280328 (+2 ms)Writing region close event to WAL at 1731436280328Closed at 1731436280328 2024-11-12T18:31:20,332 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/.initializing 2024-11-12T18:31:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/WALs/9911683f163c,39667,1731436278966 2024-11-12T18:31:20,341 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T18:31:20,356 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C39667%2C1731436278966, suffix=, logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/WALs/9911683f163c,39667,1731436278966, archiveDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/oldWALs, maxLogs=10 2024-11-12T18:31:20,388 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/WALs/9911683f163c,39667,1731436278966/9911683f163c%2C39667%2C1731436278966.1731436280361, exclude list is [], retry=0 2024-11-12T18:31:20,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:20,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33251,DS-81d09389-2d6a-4795-bf4a-d478ee9792e2,DISK] 2024-11-12T18:31:20,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34479,DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e,DISK] 2024-11-12T18:31:20,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34103,DS-0a164ef3-8c54-4141-abef-0fc4d11872c1,DISK] 2024-11-12T18:31:20,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-12T18:31:20,459 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/WALs/9911683f163c,39667,1731436278966/9911683f163c%2C39667%2C1731436278966.1731436280361 2024-11-12T18:31:20,460 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40871:40871),(127.0.0.1/127.0.0.1:40355:40355),(127.0.0.1/127.0.0.1:40879:40879)] 2024-11-12T18:31:20,460 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:20,461 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:20,464 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,465 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:31:20,537 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:20,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:20,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:31:20,544 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:20,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:20,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,548 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:31:20,548 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:20,549 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:20,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:31:20,552 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:20,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:20,554 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,558 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,559 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,565 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,566 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,569 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:20,573 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:20,580 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:20,581 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72746269, jitterRate=0.08400388062000275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:20,587 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436280478Initializing all the Stores at 1731436280481 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436280481Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436280482 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436280482Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436280482Cleaning up temporary data from old regions at 1731436280566 (+84 ms)Region opened successfully at 1731436280587 (+21 ms) 2024-11-12T18:31:20,588 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:31:20,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-12T18:31:20,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-12T18:31:20,629 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb7762, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:20,666 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:31:20,678 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:31:20,678 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:31:20,681 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:31:20,683 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-12T18:31:20,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-12T18:31:20,689 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:31:20,716 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:31:20,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:31:20,729 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:31:20,732 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:31:20,733 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:31:20,735 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:31:20,738 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:31:20,742 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:31:20,744 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:31:20,745 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:31:20,747 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:31:20,768 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:31:20,770 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:31:20,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:20,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:20,774 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:20,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,775 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,778 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,39667,1731436278966, sessionid=0x1003542fc2a0000, setting cluster-up flag (Was=false) 2024-11-12T18:31:20,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,791 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,797 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:31:20,798 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,39667,1731436278966 2024-11-12T18:31:20,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,804 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:20,810 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:31:20,812 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,39667,1731436278966 2024-11-12T18:31:20,819 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:31:20,846 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(746): ClusterId : b5d8d377-8d61-4767-a2bf-2832c922f6f2 2024-11-12T18:31:20,847 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(746): ClusterId : b5d8d377-8d61-4767-a2bf-2832c922f6f2 2024-11-12T18:31:20,847 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(746): ClusterId : b5d8d377-8d61-4767-a2bf-2832c922f6f2 2024-11-12T18:31:20,849 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:20,849 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:20,849 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:20,854 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:20,854 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:20,854 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:20,854 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:20,854 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:20,854 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:20,858 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:20,858 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:20,858 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:20,858 DEBUG [RS:2;9911683f163c:33815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77aa6c90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:20,858 DEBUG [RS:1;9911683f163c:35853 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5b209d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:20,859 DEBUG [RS:0;9911683f163c:40023 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@698f323e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:20,874 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9911683f163c:33815 2024-11-12T18:31:20,877 INFO [RS:2;9911683f163c:33815 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:20,878 INFO [RS:2;9911683f163c:33815 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:20,878 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:20,880 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:40023 2024-11-12T18:31:20,880 INFO [RS:0;9911683f163c:40023 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:20,880 INFO [RS:0;9911683f163c:40023 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:20,880 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:20,880 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,39667,1731436278966 with port=33815, startcode=1731436279911 2024-11-12T18:31:20,880 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9911683f163c:35853 2024-11-12T18:31:20,881 INFO [RS:1;9911683f163c:35853 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:20,881 INFO [RS:1;9911683f163c:35853 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:20,881 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:20,882 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,39667,1731436278966 with port=40023, startcode=1731436279734 2024-11-12T18:31:20,882 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,39667,1731436278966 with port=35853, startcode=1731436279852 2024-11-12T18:31:20,895 DEBUG [RS:2;9911683f163c:33815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:20,895 DEBUG [RS:0;9911683f163c:40023 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:20,895 DEBUG [RS:1;9911683f163c:35853 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:20,905 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:20,917 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:31:20,925 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:31:20,931 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,39667,1731436278966 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:31:20,938 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52083, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:20,938 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53739, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:20,938 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41115, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:20,942 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:20,942 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:20,942 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:20,942 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:20,943 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:31:20,943 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:20,943 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:20,943 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:20,944 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-12T18:31:20,948 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436310948 2024-11-12T18:31:20,950 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:20,950 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:31:20,951 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:31:20,951 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-12T18:31:20,952 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:31:20,952 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-12T18:31:20,956 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:31:20,957 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:31:20,957 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:31:20,957 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:31:20,958 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:20,958 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:31:20,958 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:20,965 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:31:20,967 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:31:20,967 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:31:20,970 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:31:20,971 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:31:20,973 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,973 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,973 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436280972,5,FailOnTimeoutGroup] 2024-11-12T18:31:20,974 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436280973,5,FailOnTimeoutGroup] 2024-11-12T18:31:20,974 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:20,974 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:31:20,974 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-12T18:31:20,974 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-12T18:31:20,974 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-12T18:31:20,975 WARN [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-12T18:31:20,975 WARN [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-12T18:31:20,975 WARN [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-12T18:31:20,975 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:20,976 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:20,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:41172 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41172 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:20,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-12T18:31:20,986 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:20,988 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:31:20,989 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98 2024-11-12T18:31:20,996 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:20,996 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:21,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:33358 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:33251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33358 dst: /127.0.0.1:33251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:21,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-12T18:31:21,011 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:21,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:21,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:31:21,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:31:21,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:31:21,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:31:21,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:31:21,026 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:31:21,026 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:31:21,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:31:21,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,031 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:31:21,032 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740 2024-11-12T18:31:21,033 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740 2024-11-12T18:31:21,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:31:21,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:31:21,038 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:21,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:31:21,047 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:21,048 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70312542, jitterRate=0.04773852229118347}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:21,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436281013Initializing all the Stores at 1731436281015 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436281015Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436281016 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436281016Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436281016Cleaning up temporary data from old regions at 1731436281037 (+21 ms)Region opened successfully at 1731436281052 (+15 ms) 2024-11-12T18:31:21,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:31:21,053 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:31:21,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:31:21,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:31:21,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:31:21,055 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:21,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436281053Disabling compacts and flushes for region at 1731436281053Disabling writes for close at 1731436281053Writing region close event to WAL at 1731436281054 (+1 ms)Closed at 1731436281055 (+1 ms) 2024-11-12T18:31:21,060 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:21,060 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:31:21,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:31:21,076 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,39667,1731436278966 with port=35853, startcode=1731436279852 2024-11-12T18:31:21,076 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,39667,1731436278966 with port=33815, startcode=1731436279911 2024-11-12T18:31:21,076 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,39667,1731436278966 with port=40023, startcode=1731436279734 2024-11-12T18:31:21,079 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,35853,1731436279852 2024-11-12T18:31:21,080 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:31:21,081 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] master.ServerManager(517): Registering regionserver=9911683f163c,35853,1731436279852 2024-11-12T18:31:21,083 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:31:21,090 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,40023,1731436279734 2024-11-12T18:31:21,090 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] master.ServerManager(517): Registering regionserver=9911683f163c,40023,1731436279734 2024-11-12T18:31:21,091 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98 2024-11-12T18:31:21,091 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46695 2024-11-12T18:31:21,091 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:21,093 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,33815,1731436279911 2024-11-12T18:31:21,093 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39667 {}] master.ServerManager(517): Registering regionserver=9911683f163c,33815,1731436279911 2024-11-12T18:31:21,093 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98 2024-11-12T18:31:21,093 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46695 2024-11-12T18:31:21,094 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:21,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:21,096 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98 2024-11-12T18:31:21,096 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46695 2024-11-12T18:31:21,096 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:21,113 DEBUG [RS:1;9911683f163c:35853 {}] zookeeper.ZKUtil(111): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,35853,1731436279852 2024-11-12T18:31:21,113 WARN [RS:1;9911683f163c:35853 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:21,113 INFO [RS:1;9911683f163c:35853 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T18:31:21,114 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852 2024-11-12T18:31:21,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:21,114 DEBUG [RS:0;9911683f163c:40023 {}] zookeeper.ZKUtil(111): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,40023,1731436279734 2024-11-12T18:31:21,115 WARN [RS:0;9911683f163c:40023 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:21,115 INFO [RS:0;9911683f163c:40023 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T18:31:21,115 DEBUG [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,40023,1731436279734 2024-11-12T18:31:21,115 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,40023,1731436279734] 2024-11-12T18:31:21,115 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,35853,1731436279852] 2024-11-12T18:31:21,115 DEBUG [RS:2;9911683f163c:33815 {}] zookeeper.ZKUtil(111): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,33815,1731436279911 2024-11-12T18:31:21,115 WARN [RS:2;9911683f163c:33815 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:21,116 INFO [RS:2;9911683f163c:33815 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T18:31:21,116 DEBUG [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,33815,1731436279911 2024-11-12T18:31:21,116 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,33815,1731436279911] 2024-11-12T18:31:21,142 INFO [RS:2;9911683f163c:33815 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:21,142 INFO [RS:1;9911683f163c:35853 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:21,142 INFO [RS:0;9911683f163c:40023 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:21,157 INFO [RS:1;9911683f163c:35853 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:21,157 INFO [RS:2;9911683f163c:33815 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:21,157 INFO [RS:0;9911683f163c:40023 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:21,164 INFO [RS:2;9911683f163c:33815 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:21,164 INFO [RS:0;9911683f163c:40023 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:21,164 INFO [RS:1;9911683f163c:35853 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:21,165 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,165 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,165 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,166 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:21,166 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:21,166 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:21,172 INFO [RS:1;9911683f163c:35853 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:21,172 INFO [RS:0;9911683f163c:40023 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:21,172 INFO [RS:2;9911683f163c:33815 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:21,174 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,174 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,174 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,174 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,174 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,174 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,174 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,174 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,174 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:21,175 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,175 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:21,176 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:1;9911683f163c:35853 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:21,176 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:21,176 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:21,176 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:21,176 DEBUG [RS:0;9911683f163c:40023 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:21,176 DEBUG [RS:2;9911683f163c:33815 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:21,179 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,33815,1731436279911-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,35853,1731436279852-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:21,179 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,40023,1731436279734-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:21,204 INFO [RS:2;9911683f163c:33815 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:21,207 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,33815,1731436279911-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,207 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,207 INFO [RS:2;9911683f163c:33815 {}] regionserver.Replication(171): 9911683f163c,33815,1731436279911 started 2024-11-12T18:31:21,208 INFO [RS:0;9911683f163c:40023 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:21,208 INFO [RS:1;9911683f163c:35853 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:21,208 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,35853,1731436279852-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,208 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,40023,1731436279734-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,209 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,209 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,209 INFO [RS:0;9911683f163c:40023 {}] regionserver.Replication(171): 9911683f163c,40023,1731436279734 started 2024-11-12T18:31:21,209 INFO [RS:1;9911683f163c:35853 {}] regionserver.Replication(171): 9911683f163c,35853,1731436279852 started 2024-11-12T18:31:21,227 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,227 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,33815,1731436279911, RpcServer on 9911683f163c/172.17.0.3:33815, sessionid=0x1003542fc2a0003 2024-11-12T18:31:21,228 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:21,228 DEBUG [RS:2;9911683f163c:33815 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,33815,1731436279911 2024-11-12T18:31:21,228 DEBUG [RS:2;9911683f163c:33815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,33815,1731436279911' 2024-11-12T18:31:21,229 DEBUG [RS:2;9911683f163c:33815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:21,230 DEBUG [RS:2;9911683f163c:33815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:21,230 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:21,230 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:21,230 DEBUG [RS:2;9911683f163c:33815 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,33815,1731436279911 2024-11-12T18:31:21,231 DEBUG [RS:2;9911683f163c:33815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,33815,1731436279911' 2024-11-12T18:31:21,231 DEBUG [RS:2;9911683f163c:33815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:21,231 DEBUG [RS:2;9911683f163c:33815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:21,232 DEBUG [RS:2;9911683f163c:33815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:21,232 INFO [RS:2;9911683f163c:33815 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:21,232 INFO [RS:2;9911683f163c:33815 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:21,234 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,234 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,234 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,35853,1731436279852, RpcServer on 9911683f163c/172.17.0.3:35853, sessionid=0x1003542fc2a0002 2024-11-12T18:31:21,234 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,40023,1731436279734, RpcServer on 9911683f163c/172.17.0.3:40023, sessionid=0x1003542fc2a0001 2024-11-12T18:31:21,234 WARN [9911683f163c:39667 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:31:21,235 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:21,235 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:21,235 DEBUG [RS:0;9911683f163c:40023 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,40023,1731436279734 2024-11-12T18:31:21,235 DEBUG [RS:1;9911683f163c:35853 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,35853,1731436279852 2024-11-12T18:31:21,235 DEBUG [RS:0;9911683f163c:40023 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,40023,1731436279734' 2024-11-12T18:31:21,235 DEBUG [RS:1;9911683f163c:35853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,35853,1731436279852' 2024-11-12T18:31:21,235 DEBUG [RS:0;9911683f163c:40023 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:21,235 DEBUG [RS:1;9911683f163c:35853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:21,236 DEBUG [RS:1;9911683f163c:35853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:21,236 DEBUG [RS:0;9911683f163c:40023 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:21,236 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:21,236 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:21,237 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:21,237 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:21,237 DEBUG [RS:1;9911683f163c:35853 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,35853,1731436279852 2024-11-12T18:31:21,237 DEBUG [RS:0;9911683f163c:40023 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,40023,1731436279734 2024-11-12T18:31:21,237 DEBUG [RS:1;9911683f163c:35853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,35853,1731436279852' 2024-11-12T18:31:21,237 DEBUG [RS:0;9911683f163c:40023 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,40023,1731436279734' 2024-11-12T18:31:21,237 DEBUG [RS:1;9911683f163c:35853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:21,237 DEBUG [RS:0;9911683f163c:40023 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:21,238 DEBUG [RS:1;9911683f163c:35853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:21,238 DEBUG [RS:0;9911683f163c:40023 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:21,238 DEBUG [RS:1;9911683f163c:35853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:21,238 INFO [RS:1;9911683f163c:35853 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:21,238 INFO [RS:1;9911683f163c:35853 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:21,238 DEBUG [RS:0;9911683f163c:40023 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:21,238 INFO [RS:0;9911683f163c:40023 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:21,238 INFO [RS:0;9911683f163c:40023 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:21,338 INFO [RS:2;9911683f163c:33815 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T18:31:21,339 INFO [RS:1;9911683f163c:35853 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T18:31:21,339 INFO [RS:0;9911683f163c:40023 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T18:31:21,341 INFO [RS:2;9911683f163c:33815 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C33815%2C1731436279911, suffix=, logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,33815,1731436279911, archiveDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs, maxLogs=32 2024-11-12T18:31:21,342 INFO [RS:0;9911683f163c:40023 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C40023%2C1731436279734, suffix=, logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,40023,1731436279734, archiveDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs, maxLogs=32 2024-11-12T18:31:21,342 INFO [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C35853%2C1731436279852, suffix=, logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852, archiveDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs, maxLogs=32 2024-11-12T18:31:21,363 DEBUG [RS:1;9911683f163c:35853 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852/9911683f163c%2C35853%2C1731436279852.1731436281348, exclude list is [], retry=0 2024-11-12T18:31:21,364 DEBUG [RS:2;9911683f163c:33815 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,33815,1731436279911/9911683f163c%2C33815%2C1731436279911.1731436281344, exclude list is [], retry=0 2024-11-12T18:31:21,367 DEBUG [RS:0;9911683f163c:40023 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,40023,1731436279734/9911683f163c%2C40023%2C1731436279734.1731436281348, exclude list is [], retry=0 2024-11-12T18:31:21,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34479,DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e,DISK] 2024-11-12T18:31:21,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33251,DS-81d09389-2d6a-4795-bf4a-d478ee9792e2,DISK] 2024-11-12T18:31:21,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33251,DS-81d09389-2d6a-4795-bf4a-d478ee9792e2,DISK] 2024-11-12T18:31:21,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34103,DS-0a164ef3-8c54-4141-abef-0fc4d11872c1,DISK] 2024-11-12T18:31:21,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34103,DS-0a164ef3-8c54-4141-abef-0fc4d11872c1,DISK] 2024-11-12T18:31:21,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34479,DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e,DISK] 2024-11-12T18:31:21,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34479,DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e,DISK] 2024-11-12T18:31:21,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33251,DS-81d09389-2d6a-4795-bf4a-d478ee9792e2,DISK] 2024-11-12T18:31:21,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34103,DS-0a164ef3-8c54-4141-abef-0fc4d11872c1,DISK] 2024-11-12T18:31:21,400 INFO [RS:0;9911683f163c:40023 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,40023,1731436279734/9911683f163c%2C40023%2C1731436279734.1731436281348 2024-11-12T18:31:21,400 INFO [RS:2;9911683f163c:33815 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,33815,1731436279911/9911683f163c%2C33815%2C1731436279911.1731436281344 2024-11-12T18:31:21,401 INFO [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852/9911683f163c%2C35853%2C1731436279852.1731436281348 2024-11-12T18:31:21,401 DEBUG [RS:0;9911683f163c:40023 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40355:40355),(127.0.0.1/127.0.0.1:40871:40871),(127.0.0.1/127.0.0.1:40879:40879)] 2024-11-12T18:31:21,401 DEBUG [RS:2;9911683f163c:33815 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40355:40355),(127.0.0.1/127.0.0.1:40871:40871),(127.0.0.1/127.0.0.1:40879:40879)] 2024-11-12T18:31:21,403 DEBUG [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40879:40879),(127.0.0.1/127.0.0.1:40871:40871),(127.0.0.1/127.0.0.1:40355:40355)] 2024-11-12T18:31:21,487 DEBUG [9911683f163c:39667 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T18:31:21,495 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(204): Hosts are {9911683f163c=0} racks are {/default-rack=0} 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T18:31:21,503 INFO [9911683f163c:39667 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T18:31:21,503 INFO [9911683f163c:39667 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T18:31:21,503 INFO [9911683f163c:39667 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T18:31:21,503 DEBUG [9911683f163c:39667 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T18:31:21,511 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,35853,1731436279852 2024-11-12T18:31:21,519 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,35853,1731436279852, state=OPENING 2024-11-12T18:31:21,524 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:31:21,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:21,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:21,526 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:21,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:21,527 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,527 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,527 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,527 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,529 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:31:21,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,35853,1731436279852}] 2024-11-12T18:31:21,708 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:31:21,711 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60557, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:31:21,726 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:31:21,727 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T18:31:21,727 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-12T18:31:21,731 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C35853%2C1731436279852.meta, suffix=.meta, logDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852, archiveDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs, maxLogs=32 2024-11-12T18:31:21,749 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852/9911683f163c%2C35853%2C1731436279852.meta.1731436281733.meta, exclude list is [], retry=0 2024-11-12T18:31:21,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34103,DS-0a164ef3-8c54-4141-abef-0fc4d11872c1,DISK] 2024-11-12T18:31:21,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33251,DS-81d09389-2d6a-4795-bf4a-d478ee9792e2,DISK] 2024-11-12T18:31:21,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34479,DS-439aa30b-8e3c-49da-9fc9-da8fa02eec5e,DISK] 2024-11-12T18:31:21,757 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/WALs/9911683f163c,35853,1731436279852/9911683f163c%2C35853%2C1731436279852.meta.1731436281733.meta 2024-11-12T18:31:21,758 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40871:40871),(127.0.0.1/127.0.0.1:40355:40355),(127.0.0.1/127.0.0.1:40879:40879)] 2024-11-12T18:31:21,758 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:21,761 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:31:21,763 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:31:21,769 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:31:21,774 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:31:21,775 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:21,775 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:31:21,775 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:31:21,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:31:21,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:31:21,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:31:21,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:31:21,784 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:31:21,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:31:21,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:31:21,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:31:21,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:21,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:21,790 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:31:21,791 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740 2024-11-12T18:31:21,794 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740 2024-11-12T18:31:21,797 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:31:21,797 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:31:21,798 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:21,800 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:31:21,802 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74705683, jitterRate=0.11320142447948456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:21,802 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:31:21,804 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436281776Writing region info on filesystem at 1731436281776Initializing all the Stores at 1731436281778 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436281778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436281779 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436281779Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436281779Cleaning up temporary data from old regions at 1731436281797 (+18 ms)Running coprocessor post-open hooks at 1731436281802 (+5 ms)Region opened successfully at 1731436281804 (+2 ms) 2024-11-12T18:31:21,841 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436281698 2024-11-12T18:31:21,857 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:31:21,858 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:31:21,863 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,35853,1731436279852 2024-11-12T18:31:21,866 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,35853,1731436279852, state=OPEN 2024-11-12T18:31:21,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:21,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:21,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:21,870 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,870 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,870 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,869 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:21,870 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:21,870 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,35853,1731436279852 2024-11-12T18:31:21,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:31:21,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,35853,1731436279852 in 339 msec 2024-11-12T18:31:21,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:31:21,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 811 msec 2024-11-12T18:31:21,886 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:21,886 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:31:21,908 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:31:21,909 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,35853,1731436279852, seqNum=-1] 2024-11-12T18:31:21,931 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:21,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:21,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1040 sec 2024-11-12T18:31:21,955 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436281955, completionTime=-1 2024-11-12T18:31:21,958 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T18:31:21,958 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:31:21,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T18:31:21,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436341985 2024-11-12T18:31:21,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436401985 2024-11-12T18:31:21,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 27 msec 2024-11-12T18:31:21,987 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-12T18:31:21,994 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,39667,1731436278966-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,994 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,39667,1731436278966-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,994 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,39667,1731436278966-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,996 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:39667, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,996 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:21,996 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:22,004 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:31:22,027 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.026sec 2024-11-12T18:31:22,028 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:31:22,030 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:31:22,031 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:31:22,031 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:31:22,031 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:31:22,032 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,39667,1731436278966-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:22,033 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,39667,1731436278966-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:31:22,037 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:31:22,038 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:31:22,039 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,39667,1731436278966-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:22,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4823d15, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:22,062 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-12T18:31:22,063 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-12T18:31:22,067 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,39667,-1 for getting cluster id 2024-11-12T18:31:22,070 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:31:22,080 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b5d8d377-8d61-4767-a2bf-2832c922f6f2' 2024-11-12T18:31:22,082 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:31:22,082 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b5d8d377-8d61-4767-a2bf-2832c922f6f2" 2024-11-12T18:31:22,083 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5133117b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:22,083 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,39667,-1] 2024-11-12T18:31:22,086 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:31:22,087 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:22,088 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59218, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:31:22,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@402fd37f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:22,092 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:31:22,102 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,35853,1731436279852, seqNum=-1] 2024-11-12T18:31:22,103 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:22,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37738, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:22,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,39667,1731436278966 2024-11-12T18:31:22,133 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:31:22,138 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,39667,1731436278966 2024-11-12T18:31:22,141 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7661c0ce 2024-11-12T18:31:22,142 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:31:22,145 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59232, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:31:22,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:31:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T18:31:22,163 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:31:22,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T18:31:22,166 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:22,168 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:31:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:22,178 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:22,178 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:22,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:56922 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:34103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56922 dst: /127.0.0.1:34103 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:22,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-12T18:31:22,192 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:22,195 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8f19f053379eeda9a8b6b19fee42d7cc, NAME => 'TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98 2024-11-12T18:31:22,202 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:22,202 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:22,208 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:56936 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56936 dst: /127.0.0.1:34103 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:22,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-12T18:31:22,216 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:22,217 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:22,217 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 8f19f053379eeda9a8b6b19fee42d7cc, disabling compactions & flushes 2024-11-12T18:31:22,217 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,217 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,217 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. after waiting 0 ms 2024-11-12T18:31:22,217 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,217 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,217 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8f19f053379eeda9a8b6b19fee42d7cc: Waiting for close lock at 1731436282217Disabling compacts and flushes for region at 1731436282217Disabling writes for close at 1731436282217Writing region close event to WAL at 1731436282217Closed at 1731436282217 2024-11-12T18:31:22,220 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:31:22,225 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731436282220"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436282220"}]},"ts":"1731436282220"} 2024-11-12T18:31:22,232 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:31:22,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:31:22,238 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436282235"}]},"ts":"1731436282235"} 2024-11-12T18:31:22,245 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T18:31:22,245 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9911683f163c=0} racks are {/default-rack=0} 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T18:31:22,247 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T18:31:22,247 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T18:31:22,247 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T18:31:22,247 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T18:31:22,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8f19f053379eeda9a8b6b19fee42d7cc, ASSIGN}] 2024-11-12T18:31:22,252 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8f19f053379eeda9a8b6b19fee42d7cc, ASSIGN 2024-11-12T18:31:22,255 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8f19f053379eeda9a8b6b19fee42d7cc, ASSIGN; state=OFFLINE, location=9911683f163c,35853,1731436279852; forceNewPlan=false, retain=false 2024-11-12T18:31:22,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:22,408 INFO [9911683f163c:39667 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T18:31:22,409 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8f19f053379eeda9a8b6b19fee42d7cc, regionState=OPENING, regionLocation=9911683f163c,35853,1731436279852 2024-11-12T18:31:22,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8f19f053379eeda9a8b6b19fee42d7cc, ASSIGN because future has completed 2024-11-12T18:31:22,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8f19f053379eeda9a8b6b19fee42d7cc, server=9911683f163c,35853,1731436279852}] 2024-11-12T18:31:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:22,578 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,578 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8f19f053379eeda9a8b6b19fee42d7cc, NAME => 'TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:22,578 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,579 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:22,579 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,579 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,581 INFO [StoreOpener-8f19f053379eeda9a8b6b19fee42d7cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,583 INFO [StoreOpener-8f19f053379eeda9a8b6b19fee42d7cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f19f053379eeda9a8b6b19fee42d7cc columnFamilyName cf 2024-11-12T18:31:22,583 DEBUG [StoreOpener-8f19f053379eeda9a8b6b19fee42d7cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:22,585 INFO [StoreOpener-8f19f053379eeda9a8b6b19fee42d7cc-1 {}] regionserver.HStore(327): Store=8f19f053379eeda9a8b6b19fee42d7cc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:22,585 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,586 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,587 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,588 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,588 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,591 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,599 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:22,600 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8f19f053379eeda9a8b6b19fee42d7cc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71779521, jitterRate=0.06959821283817291}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:31:22,600 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:22,601 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8f19f053379eeda9a8b6b19fee42d7cc: Running coprocessor pre-open hook at 1731436282579Writing region info on filesystem at 1731436282579Initializing all the Stores at 1731436282581 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436282581Cleaning up temporary data from old regions at 1731436282588 (+7 ms)Running coprocessor post-open hooks at 1731436282600 (+12 ms)Region opened successfully at 1731436282601 (+1 ms) 2024-11-12T18:31:22,604 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc., pid=6, masterSystemTime=1731436282570 2024-11-12T18:31:22,607 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,608 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:22,609 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8f19f053379eeda9a8b6b19fee42d7cc, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,35853,1731436279852 2024-11-12T18:31:22,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8f19f053379eeda9a8b6b19fee42d7cc, server=9911683f163c,35853,1731436279852 because future has completed 2024-11-12T18:31:22,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:31:22,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8f19f053379eeda9a8b6b19fee42d7cc, server=9911683f163c,35853,1731436279852 in 201 msec 2024-11-12T18:31:22,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:31:22,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8f19f053379eeda9a8b6b19fee42d7cc, ASSIGN in 370 msec 2024-11-12T18:31:22,624 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:31:22,625 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436282624"}]},"ts":"1731436282624"} 2024-11-12T18:31:22,628 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T18:31:22,630 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:31:22,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 476 msec 2024-11-12T18:31:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:22,798 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T18:31:22,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T18:31:22,800 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:31:22,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T18:31:22,806 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:31:22,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T18:31:22,816 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc., hostname=9911683f163c,35853,1731436279852, seqNum=2] 2024-11-12T18:31:22,827 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-12T18:31:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T18:31:22,836 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T18:31:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:22,838 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T18:31:22,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T18:31:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:23,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35853 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T18:31:23,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,007 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8f19f053379eeda9a8b6b19fee42d7cc 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T18:31:23,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/.tmp/cf/a718ac7bf41741ffbbba77e1bcabebe9 is 36, key is row/cf:cq/1731436282819/Put/seqid=0 2024-11-12T18:31:23,074 WARN [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:23,074 WARN [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:23,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_209909197_22 at /127.0.0.1:33432 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33432 dst: /127.0.0.1:33251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:23,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-12T18:31:23,083 WARN [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:23,084 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/.tmp/cf/a718ac7bf41741ffbbba77e1bcabebe9 2024-11-12T18:31:23,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/.tmp/cf/a718ac7bf41741ffbbba77e1bcabebe9 as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/cf/a718ac7bf41741ffbbba77e1bcabebe9 2024-11-12T18:31:23,145 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/cf/a718ac7bf41741ffbbba77e1bcabebe9, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T18:31:23,152 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 8f19f053379eeda9a8b6b19fee42d7cc in 145ms, sequenceid=5, compaction requested=false 2024-11-12T18:31:23,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-12T18:31:23,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8f19f053379eeda9a8b6b19fee42d7cc: 2024-11-12T18:31:23,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:23,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T18:31:23,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T18:31:23,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T18:31:23,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 323 msec 2024-11-12T18:31:23,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 338 msec 2024-11-12T18:31:23,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39667 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:23,468 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T18:31:23,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:31:23,484 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:31:23,484 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:23,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,490 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,490 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:31:23,490 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:31:23,490 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2134490824, stopped=false 2024-11-12T18:31:23,490 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,39667,1731436278966 2024-11-12T18:31:23,493 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:23,493 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:23,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:23,493 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:31:23,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:23,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:23,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:23,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:23,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:23,493 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:31:23,493 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:23,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:23,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:23,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,40023,1731436279734' ***** 2024-11-12T18:31:23,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:23,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,35853,1731436279852' ***** 2024-11-12T18:31:23,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:23,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,33815,1731436279911' ***** 2024-11-12T18:31:23,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:23,495 INFO [RS:2;9911683f163c:33815 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:23,495 INFO [RS:1;9911683f163c:35853 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:23,495 INFO [RS:0;9911683f163c:40023 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:23,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:23,495 INFO [RS:1;9911683f163c:35853 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:23,495 INFO [RS:0;9911683f163c:40023 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:23,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:23,495 INFO [RS:1;9911683f163c:35853 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:23,495 INFO [RS:0;9911683f163c:40023 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:23,495 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:23,496 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,40023,1731436279734 2024-11-12T18:31:23,496 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(3091): Received CLOSE for 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:23,496 INFO [RS:0;9911683f163c:40023 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:23,496 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:23,496 INFO [RS:2;9911683f163c:33815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:23,496 INFO [RS:0;9911683f163c:40023 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:40023. 2024-11-12T18:31:23,496 INFO [RS:2;9911683f163c:33815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:23,496 DEBUG [RS:0;9911683f163c:40023 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:23,496 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,33815,1731436279911 2024-11-12T18:31:23,496 DEBUG [RS:0;9911683f163c:40023 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,496 INFO [RS:2;9911683f163c:33815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:23,496 INFO [RS:2;9911683f163c:33815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9911683f163c:33815. 2024-11-12T18:31:23,496 DEBUG [RS:2;9911683f163c:33815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:23,496 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,40023,1731436279734; all regions closed. 2024-11-12T18:31:23,496 DEBUG [RS:2;9911683f163c:33815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,496 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,35853,1731436279852 2024-11-12T18:31:23,496 INFO [RS:1;9911683f163c:35853 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:23,496 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,33815,1731436279911; all regions closed. 2024-11-12T18:31:23,496 INFO [RS:1;9911683f163c:35853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9911683f163c:35853. 2024-11-12T18:31:23,496 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:23,497 DEBUG [RS:1;9911683f163c:35853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:23,497 DEBUG [RS:1;9911683f163c:35853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,497 INFO [RS:1;9911683f163c:35853 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:23,497 INFO [RS:1;9911683f163c:35853 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:23,497 INFO [RS:1;9911683f163c:35853 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:23,497 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:31:23,497 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8f19f053379eeda9a8b6b19fee42d7cc, disabling compactions & flushes 2024-11-12T18:31:23,498 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,498 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,498 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8f19f053379eeda9a8b6b19fee42d7cc=TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc.} 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. after waiting 0 ms 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,498 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:31:23,498 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:31:23,498 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8f19f053379eeda9a8b6b19fee42d7cc 2024-11-12T18:31:23,498 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T18:31:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_1073741827_1017 (size=93) 2024-11-12T18:31:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_1073741827_1017 (size=93) 2024-11-12T18:31:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_1073741828_1018 (size=93) 2024-11-12T18:31:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741828_1018 (size=93) 2024-11-12T18:31:23,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_1073741828_1018 (size=93) 2024-11-12T18:31:23,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741827_1017 (size=93) 2024-11-12T18:31:23,517 DEBUG [RS:2;9911683f163c:33815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs 2024-11-12T18:31:23,517 INFO [RS:2;9911683f163c:33815 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9911683f163c%2C33815%2C1731436279911:(num 1731436281344) 2024-11-12T18:31:23,518 DEBUG [RS:2;9911683f163c:33815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:23,518 DEBUG [RS:0;9911683f163c:40023 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:23,518 INFO [RS:0;9911683f163c:40023 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9911683f163c%2C40023%2C1731436279734:(num 1731436281348) 2024-11-12T18:31:23,518 DEBUG [RS:0;9911683f163c:40023 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,518 INFO [RS:0;9911683f163c:40023 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:23,518 INFO [RS:0;9911683f163c:40023 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:23,518 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:23,518 INFO [RS:0;9911683f163c:40023 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:23,518 INFO [RS:2;9911683f163c:33815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:23,518 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:23,519 INFO [RS:0;9911683f163c:40023 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:23,519 INFO [RS:0;9911683f163c:40023 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:23,519 INFO [RS:0;9911683f163c:40023 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:23,519 INFO [RS:0;9911683f163c:40023 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:23,519 INFO [RS:2;9911683f163c:33815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33815 2024-11-12T18:31:23,519 INFO [RS:0;9911683f163c:40023 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40023 2024-11-12T18:31:23,523 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,40023,1731436279734 2024-11-12T18:31:23,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:23,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,33815,1731436279911 2024-11-12T18:31:23,524 INFO [RS:0;9911683f163c:40023 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:23,524 INFO [RS:2;9911683f163c:33815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:23,525 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,40023,1731436279734] 2024-11-12T18:31:23,527 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,40023,1731436279734 already deleted, retry=false 2024-11-12T18:31:23,527 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,40023,1731436279734 expired; onlineServers=2 2024-11-12T18:31:23,527 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,33815,1731436279911] 2024-11-12T18:31:23,528 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,33815,1731436279911 already deleted, retry=false 2024-11-12T18:31:23,529 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,33815,1731436279911 expired; onlineServers=1 2024-11-12T18:31:23,542 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/info/2cba200e6a57413ebb02116094be7cf0 is 153, key is TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc./info:regioninfo/1731436282608/Put/seqid=0 2024-11-12T18:31:23,547 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:23,547 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:23,555 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/default/TestHBaseWalOnEC/8f19f053379eeda9a8b6b19fee42d7cc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T18:31:23,561 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,561 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8f19f053379eeda9a8b6b19fee42d7cc: Waiting for close lock at 1731436283497Running coprocessor pre-close hooks at 1731436283497Disabling compacts and flushes for region at 1731436283497Disabling writes for close at 1731436283498 (+1 ms)Writing region close event to WAL at 1731436283501 (+3 ms)Running coprocessor post-close hooks at 1731436283557 (+56 ms)Closed at 1731436283561 (+4 ms) 2024-11-12T18:31:23,562 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_209909197_22 at /127.0.0.1:33452 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33452 dst: /127.0.0.1:33251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:23,562 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731436282146.8f19f053379eeda9a8b6b19fee42d7cc. 2024-11-12T18:31:23,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-12T18:31:23,582 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:23,586 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:23,586 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:23,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-12T18:31:23,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-12T18:31:23,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-12T18:31:23,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-12T18:31:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-12T18:31:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-12T18:31:23,627 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:23,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:23,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33815-0x1003542fc2a0003, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:23,627 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40023-0x1003542fc2a0001, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:23,627 INFO [RS:0;9911683f163c:40023 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:23,627 INFO [RS:2;9911683f163c:33815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:23,628 INFO [RS:0;9911683f163c:40023 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,40023,1731436279734; zookeeper connection closed. 2024-11-12T18:31:23,628 INFO [RS:2;9911683f163c:33815 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,33815,1731436279911; zookeeper connection closed. 2024-11-12T18:31:23,628 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1dedfb4f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1dedfb4f 2024-11-12T18:31:23,628 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@50d9f7d3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@50d9f7d3 2024-11-12T18:31:23,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-12T18:31:23,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-12T18:31:23,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-12T18:31:23,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-12T18:31:23,698 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:31:23,899 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:31:23,968 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:23,969 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/info/2cba200e6a57413ebb02116094be7cf0 2024-11-12T18:31:24,002 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/ns/c13cd7d83c4e4464886ef5e2bbe81b21 is 43, key is default/ns:d/1731436281938/Put/seqid=0 2024-11-12T18:31:24,004 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,005 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_209909197_22 at /127.0.0.1:57004 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:34103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57004 dst: /127.0.0.1:34103 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:24,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-12T18:31:24,016 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:24,017 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/ns/c13cd7d83c4e4464886ef5e2bbe81b21 2024-11-12T18:31:24,044 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/table/649c2acc2b2a491cbb222dffa8e70fa5 is 52, key is TestHBaseWalOnEC/table:state/1731436282624/Put/seqid=0 2024-11-12T18:31:24,047 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,047 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_209909197_22 at /127.0.0.1:41294 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41294 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:24,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-12T18:31:24,055 WARN [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:24,056 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/table/649c2acc2b2a491cbb222dffa8e70fa5 2024-11-12T18:31:24,066 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/info/2cba200e6a57413ebb02116094be7cf0 as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/info/2cba200e6a57413ebb02116094be7cf0 2024-11-12T18:31:24,075 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/info/2cba200e6a57413ebb02116094be7cf0, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T18:31:24,077 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/ns/c13cd7d83c4e4464886ef5e2bbe81b21 as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/ns/c13cd7d83c4e4464886ef5e2bbe81b21 2024-11-12T18:31:24,086 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/ns/c13cd7d83c4e4464886ef5e2bbe81b21, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T18:31:24,088 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/.tmp/table/649c2acc2b2a491cbb222dffa8e70fa5 as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/table/649c2acc2b2a491cbb222dffa8e70fa5 2024-11-12T18:31:24,098 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/table/649c2acc2b2a491cbb222dffa8e70fa5, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T18:31:24,099 DEBUG [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:31:24,100 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 601ms, sequenceid=11, compaction requested=false 2024-11-12T18:31:24,100 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T18:31:24,108 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T18:31:24,109 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:31:24,110 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:24,110 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436283498Running coprocessor pre-close hooks at 1731436283498Disabling compacts and flushes for region at 1731436283498Disabling writes for close at 1731436283498Obtaining lock to block concurrent updates at 1731436283499 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731436283499Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731436283500 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731436283501 (+1 ms)Flushing 1588230740/info: creating writer at 1731436283501Flushing 1588230740/info: appending metadata at 1731436283537 (+36 ms)Flushing 1588230740/info: closing flushed file at 1731436283538 (+1 ms)Flushing 1588230740/ns: creating writer at 1731436283980 (+442 ms)Flushing 1588230740/ns: appending metadata at 1731436284001 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731436284001Flushing 1588230740/table: creating writer at 1731436284026 (+25 ms)Flushing 1588230740/table: appending metadata at 1731436284043 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731436284043Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7be15a8e: reopening flushed file at 1731436284065 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e353326: reopening flushed file at 1731436284076 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b7ef937: reopening flushed file at 1731436284087 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 601ms, sequenceid=11, compaction requested=false at 1731436284100 (+13 ms)Writing region close event to WAL at 1731436284102 (+2 ms)Running coprocessor post-close hooks at 1731436284109 (+7 ms)Closed at 1731436284110 (+1 ms) 2024-11-12T18:31:24,110 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:24,245 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T18:31:24,246 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T18:31:24,299 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,35853,1731436279852; all regions closed. 2024-11-12T18:31:24,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_1073741829_1019 (size=2751) 2024-11-12T18:31:24,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_1073741829_1019 (size=2751) 2024-11-12T18:31:24,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741829_1019 (size=2751) 2024-11-12T18:31:24,307 DEBUG [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs 2024-11-12T18:31:24,307 INFO [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9911683f163c%2C35853%2C1731436279852.meta:.meta(num 1731436281733) 2024-11-12T18:31:24,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741826_1016 (size=1298) 2024-11-12T18:31:24,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_1073741826_1016 (size=1298) 2024-11-12T18:31:24,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_1073741826_1016 (size=1298) 2024-11-12T18:31:24,314 DEBUG [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/oldWALs 2024-11-12T18:31:24,314 INFO [RS:1;9911683f163c:35853 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9911683f163c%2C35853%2C1731436279852:(num 1731436281348) 2024-11-12T18:31:24,314 DEBUG [RS:1;9911683f163c:35853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:24,314 INFO [RS:1;9911683f163c:35853 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:24,314 INFO [RS:1;9911683f163c:35853 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:24,314 INFO [RS:1;9911683f163c:35853 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:24,314 INFO [RS:1;9911683f163c:35853 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:24,314 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:24,314 INFO [RS:1;9911683f163c:35853 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35853 2024-11-12T18:31:24,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,35853,1731436279852 2024-11-12T18:31:24,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:24,317 INFO [RS:1;9911683f163c:35853 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:24,318 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,35853,1731436279852] 2024-11-12T18:31:24,321 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,35853,1731436279852 already deleted, retry=false 2024-11-12T18:31:24,321 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,35853,1731436279852 expired; onlineServers=0 2024-11-12T18:31:24,321 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,39667,1731436278966' ***** 2024-11-12T18:31:24,321 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:31:24,321 INFO [M:0;9911683f163c:39667 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:24,321 INFO [M:0;9911683f163c:39667 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:24,321 DEBUG [M:0;9911683f163c:39667 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:31:24,322 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:31:24,322 DEBUG [M:0;9911683f163c:39667 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:31:24,322 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436280972 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436280972,5,FailOnTimeoutGroup] 2024-11-12T18:31:24,322 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436280973 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436280973,5,FailOnTimeoutGroup] 2024-11-12T18:31:24,322 INFO [M:0;9911683f163c:39667 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:24,322 INFO [M:0;9911683f163c:39667 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:24,322 DEBUG [M:0;9911683f163c:39667 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:31:24,322 INFO [M:0;9911683f163c:39667 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:31:24,322 INFO [M:0;9911683f163c:39667 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:31:24,323 INFO [M:0;9911683f163c:39667 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:31:24,323 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:31:24,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:24,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:24,324 DEBUG [M:0;9911683f163c:39667 {}] zookeeper.ZKUtil(347): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:31:24,324 WARN [M:0;9911683f163c:39667 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:31:24,325 INFO [M:0;9911683f163c:39667 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/.lastflushedseqids 2024-11-12T18:31:24,335 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,335 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,337 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:57030 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:34103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57030 dst: /127.0.0.1:34103 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:24,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-12T18:31:24,342 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:24,342 INFO [M:0;9911683f163c:39667 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:31:24,342 INFO [M:0;9911683f163c:39667 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:31:24,343 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:31:24,343 INFO [M:0;9911683f163c:39667 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:24,343 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:24,343 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:31:24,343 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:24,343 INFO [M:0;9911683f163c:39667 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-12T18:31:24,363 DEBUG [M:0;9911683f163c:39667 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c23b3245c7354835832fb48db1724ca1 is 82, key is hbase:meta,,1/info:regioninfo/1731436281863/Put/seqid=0 2024-11-12T18:31:24,366 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,366 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:41322 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41322 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:24,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-12T18:31:24,374 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:24,374 INFO [M:0;9911683f163c:39667 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c23b3245c7354835832fb48db1724ca1 2024-11-12T18:31:24,401 DEBUG [M:0;9911683f163c:39667 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cbab9d00e9284e00bb8c2368ee7058a6 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436282632/Put/seqid=0 2024-11-12T18:31:24,404 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,404 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,407 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:41332 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:34479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41332 dst: /127.0.0.1:34479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:24,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_-9223372036854775552_1037 (size=6438) 2024-11-12T18:31:24,412 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:24,412 INFO [M:0;9911683f163c:39667 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cbab9d00e9284e00bb8c2368ee7058a6 2024-11-12T18:31:24,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:24,419 INFO [RS:1;9911683f163c:35853 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:24,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35853-0x1003542fc2a0002, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:24,419 INFO [RS:1;9911683f163c:35853 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,35853,1731436279852; zookeeper connection closed. 2024-11-12T18:31:24,419 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e1de2d9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e1de2d9 2024-11-12T18:31:24,420 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T18:31:24,445 DEBUG [M:0;9911683f163c:39667 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68f47ec9914d44a489c65cba7bfd0bed is 69, key is 9911683f163c,33815,1731436279911/rs:state/1731436281093/Put/seqid=0 2024-11-12T18:31:24,447 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,447 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T18:31:24,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1344204572_22 at /127.0.0.1:57044 [Receiving block BP-1849397982-172.17.0.3-1731436275747:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57044 dst: /127.0.0.1:34103 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-12T18:31:24,455 WARN [M:0;9911683f163c:39667 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T18:31:24,455 INFO [M:0;9911683f163c:39667 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68f47ec9914d44a489c65cba7bfd0bed 2024-11-12T18:31:24,465 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c23b3245c7354835832fb48db1724ca1 as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c23b3245c7354835832fb48db1724ca1 2024-11-12T18:31:24,473 INFO [M:0;9911683f163c:39667 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c23b3245c7354835832fb48db1724ca1, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T18:31:24,475 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cbab9d00e9284e00bb8c2368ee7058a6 as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cbab9d00e9284e00bb8c2368ee7058a6 2024-11-12T18:31:24,484 INFO [M:0;9911683f163c:39667 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cbab9d00e9284e00bb8c2368ee7058a6, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T18:31:24,486 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68f47ec9914d44a489c65cba7bfd0bed as hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/68f47ec9914d44a489c65cba7bfd0bed 2024-11-12T18:31:24,494 INFO [M:0;9911683f163c:39667 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/68f47ec9914d44a489c65cba7bfd0bed, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T18:31:24,496 INFO [M:0;9911683f163c:39667 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=72, compaction requested=false 2024-11-12T18:31:24,497 INFO [M:0;9911683f163c:39667 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:24,497 DEBUG [M:0;9911683f163c:39667 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436284343Disabling compacts and flushes for region at 1731436284343Disabling writes for close at 1731436284343Obtaining lock to block concurrent updates at 1731436284343Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436284343Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731436284344 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436284345 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436284345Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436284363 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436284363Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436284383 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436284401 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436284401Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436284422 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436284445 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436284445Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bd399a4: reopening flushed file at 1731436284463 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d9ef71: reopening flushed file at 1731436284474 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c517bf2: reopening flushed file at 1731436284484 (+10 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=72, compaction requested=false at 1731436284496 (+12 ms)Writing region close event to WAL at 1731436284497 (+1 ms)Closed at 1731436284497 2024-11-12T18:31:24,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33251 is added to blk_1073741825_1011 (size=32665) 2024-11-12T18:31:24,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741825_1011 (size=32665) 2024-11-12T18:31:24,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34103 is added to blk_1073741825_1011 (size=32665) 2024-11-12T18:31:24,502 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:24,502 INFO [M:0;9911683f163c:39667 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:31:24,502 INFO [M:0;9911683f163c:39667 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39667 2024-11-12T18:31:24,503 INFO [M:0;9911683f163c:39667 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:24,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:24,605 INFO [M:0;9911683f163c:39667 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:24,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39667-0x1003542fc2a0000, quorum=127.0.0.1:61059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:24,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@653e6301{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:24,612 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:24,612 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:24,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:24,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:24,616 WARN [BP-1849397982-172.17.0.3-1731436275747 heartbeating to localhost/127.0.0.1:46695 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:24,616 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:24,616 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:24,616 WARN [BP-1849397982-172.17.0.3-1731436275747 heartbeating to localhost/127.0.0.1:46695 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1849397982-172.17.0.3-1731436275747 (Datanode Uuid b7e105da-d8b6-4e23-8534-3067521f61cf) service to localhost/127.0.0.1:46695 2024-11-12T18:31:24,617 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data5/current/BP-1849397982-172.17.0.3-1731436275747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:24,617 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data6/current/BP-1849397982-172.17.0.3-1731436275747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:24,618 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:24,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@513cab2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:24,621 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:24,621 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:24,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:24,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:24,622 WARN [BP-1849397982-172.17.0.3-1731436275747 heartbeating to localhost/127.0.0.1:46695 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:24,622 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:24,622 WARN [BP-1849397982-172.17.0.3-1731436275747 heartbeating to localhost/127.0.0.1:46695 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1849397982-172.17.0.3-1731436275747 (Datanode Uuid 759c677e-5a6f-4018-9a5f-a97b86fdd325) service to localhost/127.0.0.1:46695 2024-11-12T18:31:24,622 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:24,623 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data3/current/BP-1849397982-172.17.0.3-1731436275747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:24,623 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data4/current/BP-1849397982-172.17.0.3-1731436275747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:24,624 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:24,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65462677{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:24,626 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:24,626 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:24,627 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:24,627 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:24,628 WARN [BP-1849397982-172.17.0.3-1731436275747 heartbeating to localhost/127.0.0.1:46695 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:24,628 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:24,628 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:24,628 WARN [BP-1849397982-172.17.0.3-1731436275747 heartbeating to localhost/127.0.0.1:46695 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1849397982-172.17.0.3-1731436275747 (Datanode Uuid 9648689b-98e9-4b66-827e-7a20ddee16d0) service to localhost/127.0.0.1:46695 2024-11-12T18:31:24,629 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data1/current/BP-1849397982-172.17.0.3-1731436275747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:24,629 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/cluster_cc1527d1-8f16-9f41-1385-0d9a0c984ebf/data/data2/current/BP-1849397982-172.17.0.3-1731436275747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:24,629 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:24,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58dbf239{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:31:24,639 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:24,639 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:24,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:24,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:24,648 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:31:24,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:31:24,691 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=90 (was 161), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=152 (was 148) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6422 (was 6691) 2024-11-12T18:31:24,699 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=90, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=152, ProcessCount=11, AvailableMemoryMB=6422 2024-11-12T18:31:24,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:31:24,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.log.dir so I do NOT create it in target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a 2024-11-12T18:31:24,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b5ddb246-22e1-6d4a-7ee8-4590c8350f90/hadoop.tmp.dir so I do NOT create it in target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a 2024-11-12T18:31:24,700 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276, deleteOnExit=true 2024-11-12T18:31:24,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:31:24,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/test.cache.data in system properties and HBase conf 2024-11-12T18:31:24,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:31:24,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:31:24,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:31:24,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:31:24,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:31:24,701 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:31:24,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:31:24,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:31:24,798 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:24,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:24,808 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:24,808 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:24,808 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:31:24,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:24,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@467f22c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:24,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a1af01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:24,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3677c717{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/java.io.tmpdir/jetty-localhost-41311-hadoop-hdfs-3_4_1-tests_jar-_-any-4860455930588016891/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:31:24,927 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6800152e{HTTP/1.1, (http/1.1)}{localhost:41311} 2024-11-12T18:31:24,927 INFO [Time-limited test {}] server.Server(415): Started @11219ms 2024-11-12T18:31:25,014 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:25,018 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:25,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:25,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:25,019 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:31:25,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23b354d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:25,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f3f348d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:25,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3950f25b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/java.io.tmpdir/jetty-localhost-43873-hadoop-hdfs-3_4_1-tests_jar-_-any-5923973823939986722/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:25,137 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@151f492e{HTTP/1.1, (http/1.1)}{localhost:43873} 2024-11-12T18:31:25,137 INFO [Time-limited test {}] server.Server(415): Started @11429ms 2024-11-12T18:31:25,139 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:25,176 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:25,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:25,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:25,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:25,183 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:25,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d788f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:25,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d995367{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:25,235 WARN [Thread-525 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data1/current/BP-299880525-172.17.0.3-1731436284742/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:25,241 WARN [Thread-526 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data2/current/BP-299880525-172.17.0.3-1731436284742/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:25,272 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47f493d5f3c3370e with lease ID 0x902036ba7996ed1c: Processing first storage report for DS-139af72f-5423-4f31-9d04-6131b550d17c from datanode DatanodeRegistration(127.0.0.1:44989, datanodeUuid=f2b60233-d509-452c-93a8-e2b2eb8bf6e5, infoPort=41737, infoSecurePort=0, ipcPort=38187, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742) 2024-11-12T18:31:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47f493d5f3c3370e with lease ID 0x902036ba7996ed1c: from storage DS-139af72f-5423-4f31-9d04-6131b550d17c node DatanodeRegistration(127.0.0.1:44989, datanodeUuid=f2b60233-d509-452c-93a8-e2b2eb8bf6e5, infoPort=41737, infoSecurePort=0, ipcPort=38187, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47f493d5f3c3370e with lease ID 0x902036ba7996ed1c: Processing first storage report for DS-23438ff8-f269-4a9d-bd4c-81fe4af958c1 from datanode DatanodeRegistration(127.0.0.1:44989, datanodeUuid=f2b60233-d509-452c-93a8-e2b2eb8bf6e5, infoPort=41737, infoSecurePort=0, ipcPort=38187, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742) 2024-11-12T18:31:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47f493d5f3c3370e with lease ID 0x902036ba7996ed1c: from storage DS-23438ff8-f269-4a9d-bd4c-81fe4af958c1 node DatanodeRegistration(127.0.0.1:44989, datanodeUuid=f2b60233-d509-452c-93a8-e2b2eb8bf6e5, infoPort=41737, infoSecurePort=0, ipcPort=38187, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:25,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2306e37f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/java.io.tmpdir/jetty-localhost-39099-hadoop-hdfs-3_4_1-tests_jar-_-any-5439181376627426907/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:25,309 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@641c2a01{HTTP/1.1, (http/1.1)}{localhost:39099} 2024-11-12T18:31:25,309 INFO [Time-limited test {}] server.Server(415): Started @11601ms 2024-11-12T18:31:25,311 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:25,357 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:25,365 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:25,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:25,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:25,366 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:31:25,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@427b8cb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:25,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fa9c833{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:25,413 WARN [Thread-560 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data3/current/BP-299880525-172.17.0.3-1731436284742/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:25,413 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data4/current/BP-299880525-172.17.0.3-1731436284742/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:25,438 WARN [Thread-540 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:25,442 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e13a9fc5c046f0c with lease ID 0x902036ba7996ed1d: Processing first storage report for DS-82e88ee4-c224-451d-91f6-b87fbd29f716 from datanode DatanodeRegistration(127.0.0.1:40111, datanodeUuid=f8428aca-e83b-4878-92a0-e572b96cc7c1, infoPort=34641, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742) 2024-11-12T18:31:25,442 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e13a9fc5c046f0c with lease ID 0x902036ba7996ed1d: from storage DS-82e88ee4-c224-451d-91f6-b87fbd29f716 node DatanodeRegistration(127.0.0.1:40111, datanodeUuid=f8428aca-e83b-4878-92a0-e572b96cc7c1, infoPort=34641, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:25,442 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e13a9fc5c046f0c with lease ID 0x902036ba7996ed1d: Processing first storage report for DS-71036a8f-c4fc-4d6c-8130-b82e784aa507 from datanode DatanodeRegistration(127.0.0.1:40111, datanodeUuid=f8428aca-e83b-4878-92a0-e572b96cc7c1, infoPort=34641, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742) 2024-11-12T18:31:25,442 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e13a9fc5c046f0c with lease ID 0x902036ba7996ed1d: from storage DS-71036a8f-c4fc-4d6c-8130-b82e784aa507 node DatanodeRegistration(127.0.0.1:40111, datanodeUuid=f8428aca-e83b-4878-92a0-e572b96cc7c1, infoPort=34641, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:25,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d5ce2d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/java.io.tmpdir/jetty-localhost-38353-hadoop-hdfs-3_4_1-tests_jar-_-any-2130942074833784625/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:25,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3403c4d{HTTP/1.1, (http/1.1)}{localhost:38353} 2024-11-12T18:31:25,487 INFO [Time-limited test {}] server.Server(415): Started @11779ms 2024-11-12T18:31:25,488 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:25,581 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data5/current/BP-299880525-172.17.0.3-1731436284742/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:25,581 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data6/current/BP-299880525-172.17.0.3-1731436284742/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:25,601 WARN [Thread-575 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69b7f76ece1b2d5d with lease ID 0x902036ba7996ed1e: Processing first storage report for DS-5959bbe2-c4f0-4ef5-b9fe-990ce5723be6 from datanode DatanodeRegistration(127.0.0.1:34587, datanodeUuid=f6ff7d22-54ff-4f95-b3b3-2f72d102ac96, infoPort=38993, infoSecurePort=0, ipcPort=36127, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742) 2024-11-12T18:31:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69b7f76ece1b2d5d with lease ID 0x902036ba7996ed1e: from storage DS-5959bbe2-c4f0-4ef5-b9fe-990ce5723be6 node DatanodeRegistration(127.0.0.1:34587, datanodeUuid=f6ff7d22-54ff-4f95-b3b3-2f72d102ac96, infoPort=38993, infoSecurePort=0, ipcPort=36127, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69b7f76ece1b2d5d with lease ID 0x902036ba7996ed1e: Processing first storage report for DS-a473d348-45e9-4874-b4fa-c68df57677e3 from datanode DatanodeRegistration(127.0.0.1:34587, datanodeUuid=f6ff7d22-54ff-4f95-b3b3-2f72d102ac96, infoPort=38993, infoSecurePort=0, ipcPort=36127, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742) 2024-11-12T18:31:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69b7f76ece1b2d5d with lease ID 0x902036ba7996ed1e: from storage DS-a473d348-45e9-4874-b4fa-c68df57677e3 node DatanodeRegistration(127.0.0.1:34587, datanodeUuid=f6ff7d22-54ff-4f95-b3b3-2f72d102ac96, infoPort=38993, infoSecurePort=0, ipcPort=36127, storageInfo=lv=-57;cid=testClusterID;nsid=85926155;c=1731436284742), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:25,618 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a 2024-11-12T18:31:25,621 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/zookeeper_0, clientPort=53243, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:31:25,622 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53243 2024-11-12T18:31:25,622 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,624 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:31:25,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:31:25,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:31:25,640 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2 with version=8 2024-11-12T18:31:25,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46695/user/jenkins/test-data/75366ec9-34d6-c3a6-531e-070e93c04d98/hbase-staging 2024-11-12T18:31:25,642 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:25,642 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,642 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,642 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:25,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:25,643 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:31:25,643 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:25,644 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42225 2024-11-12T18:31:25,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42225 connecting to ZooKeeper ensemble=127.0.0.1:53243 2024-11-12T18:31:25,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422250x0, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:25,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42225-0x100354319530000 connected 2024-11-12T18:31:25,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:25,673 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2, hbase.cluster.distributed=false 2024-11-12T18:31:25,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:25,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42225 2024-11-12T18:31:25,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42225 2024-11-12T18:31:25,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42225 2024-11-12T18:31:25,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42225 2024-11-12T18:31:25,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42225 2024-11-12T18:31:25,692 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:25,692 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:25,693 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43201 2024-11-12T18:31:25,694 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43201 connecting to ZooKeeper ensemble=127.0.0.1:53243 2024-11-12T18:31:25,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432010x0, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:25,702 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43201-0x100354319530001 connected 2024-11-12T18:31:25,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:25,702 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:25,703 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:25,703 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:25,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:25,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43201 2024-11-12T18:31:25,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43201 2024-11-12T18:31:25,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43201 2024-11-12T18:31:25,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43201 2024-11-12T18:31:25,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43201 2024-11-12T18:31:25,722 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:25,722 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:25,723 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46259 2024-11-12T18:31:25,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46259 connecting to ZooKeeper ensemble=127.0.0.1:53243 2024-11-12T18:31:25,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462590x0, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:25,731 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:462590x0, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:25,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46259-0x100354319530002 connected 2024-11-12T18:31:25,731 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:25,732 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:25,733 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:25,734 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:25,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46259 2024-11-12T18:31:25,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46259 2024-11-12T18:31:25,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46259 2024-11-12T18:31:25,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46259 2024-11-12T18:31:25,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46259 2024-11-12T18:31:25,752 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:25,752 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:25,753 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44871 2024-11-12T18:31:25,754 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44871 connecting to ZooKeeper ensemble=127.0.0.1:53243 2024-11-12T18:31:25,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448710x0, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:25,762 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:448710x0, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:25,762 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44871-0x100354319530003 connected 2024-11-12T18:31:25,762 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:25,763 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:25,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:25,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:25,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44871 2024-11-12T18:31:25,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44871 2024-11-12T18:31:25,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44871 2024-11-12T18:31:25,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44871 2024-11-12T18:31:25,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44871 2024-11-12T18:31:25,778 DEBUG [M:0;9911683f163c:42225 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:42225 2024-11-12T18:31:25,779 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,42225,1731436285642 2024-11-12T18:31:25,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,781 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,42225,1731436285642 2024-11-12T18:31:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,784 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:31:25,786 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,42225,1731436285642 from backup master directory 2024-11-12T18:31:25,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,42225,1731436285642 2024-11-12T18:31:25,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,787 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:25,787 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,42225,1731436285642 2024-11-12T18:31:25,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:25,794 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/hbase.id] with ID: 631c71ce-b5af-4d18-9f7a-faed3b4fbb53 2024-11-12T18:31:25,794 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/.tmp/hbase.id 2024-11-12T18:31:25,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:31:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:31:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:31:25,805 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/.tmp/hbase.id]:[hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/hbase.id] 2024-11-12T18:31:25,822 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:25,822 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:31:25,824 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-12T18:31:25,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:31:25,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:31:25,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:31:25,838 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:31:25,839 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:31:25,839 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:25,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:31:25,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:31:25,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:31:25,857 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store 2024-11-12T18:31:25,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:31:25,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:31:25,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:31:25,868 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:25,868 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:31:25,868 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:25,868 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:25,868 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:31:25,868 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:25,868 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:25,868 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436285868Disabling compacts and flushes for region at 1731436285868Disabling writes for close at 1731436285868Writing region close event to WAL at 1731436285868Closed at 1731436285868 2024-11-12T18:31:25,870 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/.initializing 2024-11-12T18:31:25,870 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/WALs/9911683f163c,42225,1731436285642 2024-11-12T18:31:25,874 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C42225%2C1731436285642, suffix=, logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/WALs/9911683f163c,42225,1731436285642, archiveDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/oldWALs, maxLogs=10 2024-11-12T18:31:25,875 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42225%2C1731436285642.1731436285874 2024-11-12T18:31:25,887 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/WALs/9911683f163c,42225,1731436285642/9911683f163c%2C42225%2C1731436285642.1731436285874 2024-11-12T18:31:25,888 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41737:41737),(127.0.0.1/127.0.0.1:34641:34641),(127.0.0.1/127.0.0.1:38993:38993)] 2024-11-12T18:31:25,889 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:25,889 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:25,890 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,890 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:31:25,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:25,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:25,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,898 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:31:25,898 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:25,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:25,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:31:25,902 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:25,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:25,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:31:25,905 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:25,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:25,905 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,906 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,907 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,908 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,908 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,909 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:25,910 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:25,913 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:25,914 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75197296, jitterRate=0.12052702903747559}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:25,915 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436285890Initializing all the Stores at 1731436285892 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436285892Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436285892Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436285892Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436285892Cleaning up temporary data from old regions at 1731436285908 (+16 ms)Region opened successfully at 1731436285914 (+6 ms) 2024-11-12T18:31:25,915 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:31:25,919 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b5a8ac6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:25,921 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:31:25,921 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:31:25,921 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:31:25,921 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:31:25,922 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:31:25,922 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:31:25,922 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:31:25,925 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:31:25,926 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:31:25,928 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:31:25,928 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:31:25,929 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:31:25,930 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:31:25,931 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:31:25,932 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:31:25,933 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:31:25,934 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:31:25,935 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:31:25,937 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:31:25,939 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,942 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,42225,1731436285642, sessionid=0x100354319530000, setting cluster-up flag (Was=false) 2024-11-12T18:31:25,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,951 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:31:25,952 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,42225,1731436285642 2024-11-12T18:31:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:25,961 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:31:25,962 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,42225,1731436285642 2024-11-12T18:31:25,964 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:31:25,967 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:25,967 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:31:25,967 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:31:25,967 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,42225,1731436285642 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:31:25,968 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(746): ClusterId : 631c71ce-b5af-4d18-9f7a-faed3b4fbb53 2024-11-12T18:31:25,968 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(746): ClusterId : 631c71ce-b5af-4d18-9f7a-faed3b4fbb53 2024-11-12T18:31:25,968 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:25,968 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(746): ClusterId : 631c71ce-b5af-4d18-9f7a-faed3b4fbb53 2024-11-12T18:31:25,968 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:25,968 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:25,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:25,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:25,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:25,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:25,971 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:25,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:31:25,971 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:25,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:25,972 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:25,972 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:25,972 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:25,972 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:25,972 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:25,972 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:25,975 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:25,975 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:31:25,975 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:25,975 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:25,976 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:25,976 DEBUG [RS:2;9911683f163c:44871 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6197c0b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:25,976 DEBUG [RS:0;9911683f163c:43201 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ff4fd75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:25,976 DEBUG [RS:1;9911683f163c:46259 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65f12a22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:25,976 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:25,977 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436315984 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:31:25,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:31:25,986 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:25,987 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:31:25,987 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:31:25,987 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:31:25,988 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:31:25,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:31:25,990 DEBUG [RS:0;9911683f163c:43201 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:43201 2024-11-12T18:31:25,990 INFO [RS:0;9911683f163c:43201 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:25,990 INFO [RS:0;9911683f163c:43201 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:25,990 DEBUG [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:25,991 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,42225,1731436285642 with port=43201, startcode=1731436285691 2024-11-12T18:31:25,991 DEBUG [RS:0;9911683f163c:43201 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:25,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436285989,5,FailOnTimeoutGroup] 2024-11-12T18:31:25,993 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9911683f163c:44871 2024-11-12T18:31:25,993 INFO [RS:2;9911683f163c:44871 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:25,993 INFO [RS:2;9911683f163c:44871 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:25,993 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:25,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436285993,5,FailOnTimeoutGroup] 2024-11-12T18:31:25,993 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:25,994 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:31:25,994 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:25,994 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:25,994 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,42225,1731436285642 with port=44871, startcode=1731436285751 2024-11-12T18:31:25,994 DEBUG [RS:2;9911683f163c:44871 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:25,996 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43175, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:25,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,43201,1731436285691 2024-11-12T18:31:25,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(517): Registering regionserver=9911683f163c,43201,1731436285691 2024-11-12T18:31:25,997 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9911683f163c:46259 2024-11-12T18:31:25,997 INFO [RS:1;9911683f163c:46259 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:25,997 INFO [RS:1;9911683f163c:46259 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:25,997 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:25,998 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,42225,1731436285642 with port=46259, startcode=1731436285721 2024-11-12T18:31:25,998 DEBUG [RS:1;9911683f163c:46259 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:25,999 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35555, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:26,000 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,44871,1731436285751 2024-11-12T18:31:26,000 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(517): Registering regionserver=9911683f163c,44871,1731436285751 2024-11-12T18:31:26,000 DEBUG [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2 2024-11-12T18:31:26,000 DEBUG [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40445 2024-11-12T18:31:26,000 DEBUG [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:26,001 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37011, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:26,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:31:26,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:31:26,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:31:26,003 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,46259,1731436285721 2024-11-12T18:31:26,003 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42225 {}] master.ServerManager(517): Registering regionserver=9911683f163c,46259,1731436285721 2024-11-12T18:31:26,003 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2 2024-11-12T18:31:26,003 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40445 2024-11-12T18:31:26,003 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:26,005 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:31:26,005 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2 2024-11-12T18:31:26,005 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2 2024-11-12T18:31:26,005 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40445 2024-11-12T18:31:26,005 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:26,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:26,011 DEBUG [RS:2;9911683f163c:44871 {}] zookeeper.ZKUtil(111): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,44871,1731436285751 2024-11-12T18:31:26,011 DEBUG [RS:0;9911683f163c:43201 {}] zookeeper.ZKUtil(111): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,43201,1731436285691 2024-11-12T18:31:26,011 WARN [RS:0;9911683f163c:43201 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:26,011 WARN [RS:2;9911683f163c:44871 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:26,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,46259,1731436285721] 2024-11-12T18:31:26,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,44871,1731436285751] 2024-11-12T18:31:26,011 INFO [RS:2;9911683f163c:44871 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:26,011 INFO [RS:0;9911683f163c:43201 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:26,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,43201,1731436285691] 2024-11-12T18:31:26,011 DEBUG [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,43201,1731436285691 2024-11-12T18:31:26,011 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,44871,1731436285751 2024-11-12T18:31:26,012 DEBUG [RS:1;9911683f163c:46259 {}] zookeeper.ZKUtil(111): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,46259,1731436285721 2024-11-12T18:31:26,012 WARN [RS:1;9911683f163c:46259 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:26,012 INFO [RS:1;9911683f163c:46259 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:26,012 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,46259,1731436285721 2024-11-12T18:31:26,016 INFO [RS:2;9911683f163c:44871 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:26,020 INFO [RS:0;9911683f163c:43201 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:26,024 INFO [RS:2;9911683f163c:44871 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:26,024 INFO [RS:0;9911683f163c:43201 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:26,024 INFO [RS:2;9911683f163c:44871 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:26,025 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,025 INFO [RS:0;9911683f163c:43201 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:26,025 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,025 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:26,025 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:26,026 INFO [RS:2;9911683f163c:44871 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:26,026 INFO [RS:0;9911683f163c:43201 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:26,026 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,026 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,026 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,026 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,027 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,028 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,028 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,028 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,028 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:26,028 DEBUG [RS:2;9911683f163c:44871 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:26,028 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,028 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:26,028 DEBUG [RS:0;9911683f163c:43201 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:26,030 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,44871,1731436285751-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,030 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,43201,1731436285691-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:26,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:31:26,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:31:26,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:31:26,032 INFO [RS:1;9911683f163c:46259 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:26,033 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:26,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:31:26,040 INFO [RS:1;9911683f163c:46259 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:26,041 INFO [RS:1;9911683f163c:46259 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:26,041 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,041 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:26,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:31:26,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,043 INFO [RS:1;9911683f163c:46259 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:26,043 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,043 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,043 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:26,044 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:26,045 DEBUG [RS:1;9911683f163c:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:26,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:31:26,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,046 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,046 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,046 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,046 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,046 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,046 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46259,1731436285721-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:26,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:31:26,048 INFO [RS:2;9911683f163c:44871 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:26,048 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,44871,1731436285751-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,048 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:31:26,048 INFO [RS:2;9911683f163c:44871 {}] regionserver.Replication(171): 9911683f163c,44871,1731436285751 started 2024-11-12T18:31:26,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:31:26,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:31:26,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:31:26,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740 2024-11-12T18:31:26,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740 2024-11-12T18:31:26,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:31:26,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:31:26,056 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:26,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:31:26,062 INFO [RS:0;9911683f163c:43201 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:26,062 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,43201,1731436285691-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,063 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,063 INFO [RS:0;9911683f163c:43201 {}] regionserver.Replication(171): 9911683f163c,43201,1731436285691 started 2024-11-12T18:31:26,065 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:26,065 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,066 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,44871,1731436285751, RpcServer on 9911683f163c/172.17.0.3:44871, sessionid=0x100354319530003 2024-11-12T18:31:26,066 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63321906, jitterRate=-0.05643007159233093}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:26,066 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:26,066 DEBUG [RS:2;9911683f163c:44871 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,44871,1731436285751 2024-11-12T18:31:26,066 DEBUG [RS:2;9911683f163c:44871 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,44871,1731436285751' 2024-11-12T18:31:26,066 DEBUG [RS:2;9911683f163c:44871 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:26,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436286033Initializing all the Stores at 1731436286034 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436286034Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436286040 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436286040Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436286040Cleaning up temporary data from old regions at 1731436286056 (+16 ms)Region opened successfully at 1731436286067 (+11 ms) 2024-11-12T18:31:26,067 DEBUG [RS:2;9911683f163c:44871 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:26,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:31:26,067 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:31:26,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:31:26,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:31:26,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:31:26,068 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:26,068 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:26,068 DEBUG [RS:2;9911683f163c:44871 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,44871,1731436285751 2024-11-12T18:31:26,068 DEBUG [RS:2;9911683f163c:44871 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,44871,1731436285751' 2024-11-12T18:31:26,068 DEBUG [RS:2;9911683f163c:44871 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:26,068 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:26,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436286067Disabling compacts and flushes for region at 1731436286067Disabling writes for close at 1731436286067Writing region close event to WAL at 1731436286068 (+1 ms)Closed at 1731436286068 2024-11-12T18:31:26,068 DEBUG [RS:2;9911683f163c:44871 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:26,069 DEBUG [RS:2;9911683f163c:44871 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:26,069 INFO [RS:2;9911683f163c:44871 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:26,069 INFO [RS:2;9911683f163c:44871 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:26,070 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:26,070 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:31:26,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:31:26,073 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:31:26,073 INFO [RS:1;9911683f163c:46259 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:26,073 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46259,1731436285721-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,074 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,074 INFO [RS:1;9911683f163c:46259 {}] regionserver.Replication(171): 9911683f163c,46259,1731436285721 started 2024-11-12T18:31:26,074 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:31:26,086 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,086 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,43201,1731436285691, RpcServer on 9911683f163c/172.17.0.3:43201, sessionid=0x100354319530001 2024-11-12T18:31:26,086 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:26,086 DEBUG [RS:0;9911683f163c:43201 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,43201,1731436285691 2024-11-12T18:31:26,086 DEBUG [RS:0;9911683f163c:43201 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,43201,1731436285691' 2024-11-12T18:31:26,086 DEBUG [RS:0;9911683f163c:43201 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:26,087 DEBUG [RS:0;9911683f163c:43201 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:26,088 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:26,088 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:26,088 DEBUG [RS:0;9911683f163c:43201 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,43201,1731436285691 2024-11-12T18:31:26,088 DEBUG [RS:0;9911683f163c:43201 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,43201,1731436285691' 2024-11-12T18:31:26,088 DEBUG [RS:0;9911683f163c:43201 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:26,088 DEBUG [RS:0;9911683f163c:43201 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:26,089 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,089 DEBUG [RS:0;9911683f163c:43201 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:26,089 INFO [RS:0;9911683f163c:43201 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:26,089 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,46259,1731436285721, RpcServer on 9911683f163c/172.17.0.3:46259, sessionid=0x100354319530002 2024-11-12T18:31:26,089 INFO [RS:0;9911683f163c:43201 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:26,089 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:26,089 DEBUG [RS:1;9911683f163c:46259 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,46259,1731436285721 2024-11-12T18:31:26,089 DEBUG [RS:1;9911683f163c:46259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,46259,1731436285721' 2024-11-12T18:31:26,089 DEBUG [RS:1;9911683f163c:46259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:26,090 DEBUG [RS:1;9911683f163c:46259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:26,090 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:26,090 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:26,091 DEBUG [RS:1;9911683f163c:46259 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,46259,1731436285721 2024-11-12T18:31:26,091 DEBUG [RS:1;9911683f163c:46259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,46259,1731436285721' 2024-11-12T18:31:26,091 DEBUG [RS:1;9911683f163c:46259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:26,091 DEBUG [RS:1;9911683f163c:46259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:26,092 DEBUG [RS:1;9911683f163c:46259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:26,092 INFO [RS:1;9911683f163c:46259 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:26,092 INFO [RS:1;9911683f163c:46259 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:26,172 INFO [RS:2;9911683f163c:44871 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C44871%2C1731436285751, suffix=, logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,44871,1731436285751, archiveDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs, maxLogs=32 2024-11-12T18:31:26,174 INFO [RS:2;9911683f163c:44871 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C44871%2C1731436285751.1731436286174 2024-11-12T18:31:26,183 INFO [RS:2;9911683f163c:44871 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,44871,1731436285751/9911683f163c%2C44871%2C1731436285751.1731436286174 2024-11-12T18:31:26,184 DEBUG [RS:2;9911683f163c:44871 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38993:38993),(127.0.0.1/127.0.0.1:34641:34641),(127.0.0.1/127.0.0.1:41737:41737)] 2024-11-12T18:31:26,192 INFO [RS:0;9911683f163c:43201 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C43201%2C1731436285691, suffix=, logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,43201,1731436285691, archiveDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs, maxLogs=32 2024-11-12T18:31:26,193 INFO [RS:0;9911683f163c:43201 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C43201%2C1731436285691.1731436286193 2024-11-12T18:31:26,194 INFO [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C46259%2C1731436285721, suffix=, logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,46259,1731436285721, archiveDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs, maxLogs=32 2024-11-12T18:31:26,195 INFO [RS:1;9911683f163c:46259 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C46259%2C1731436285721.1731436286195 2024-11-12T18:31:26,204 INFO [RS:0;9911683f163c:43201 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,43201,1731436285691/9911683f163c%2C43201%2C1731436285691.1731436286193 2024-11-12T18:31:26,204 INFO [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,46259,1731436285721/9911683f163c%2C46259%2C1731436285721.1731436286195 2024-11-12T18:31:26,209 DEBUG [RS:0;9911683f163c:43201 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38993:38993),(127.0.0.1/127.0.0.1:34641:34641),(127.0.0.1/127.0.0.1:41737:41737)] 2024-11-12T18:31:26,212 DEBUG [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38993:38993),(127.0.0.1/127.0.0.1:34641:34641),(127.0.0.1/127.0.0.1:41737:41737)] 2024-11-12T18:31:26,225 DEBUG [9911683f163c:42225 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T18:31:26,225 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(204): Hosts are {9911683f163c=0} racks are {/default-rack=0} 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T18:31:26,228 INFO [9911683f163c:42225 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T18:31:26,228 INFO [9911683f163c:42225 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T18:31:26,228 INFO [9911683f163c:42225 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T18:31:26,228 DEBUG [9911683f163c:42225 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T18:31:26,228 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,46259,1731436285721 2024-11-12T18:31:26,230 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,46259,1731436285721, state=OPENING 2024-11-12T18:31:26,232 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:31:26,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:26,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:26,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:26,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:26,235 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:31:26,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,46259,1731436285721}] 2024-11-12T18:31:26,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,390 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:31:26,392 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41147, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:31:26,398 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:31:26,398 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:26,401 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C46259%2C1731436285721.meta, suffix=.meta, logDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,46259,1731436285721, archiveDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs, maxLogs=32 2024-11-12T18:31:26,402 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C46259%2C1731436285721.meta.1731436286401.meta 2024-11-12T18:31:26,412 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/WALs/9911683f163c,46259,1731436285721/9911683f163c%2C46259%2C1731436285721.meta.1731436286401.meta 2024-11-12T18:31:26,415 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38993:38993),(127.0.0.1/127.0.0.1:41737:41737),(127.0.0.1/127.0.0.1:34641:34641)] 2024-11-12T18:31:26,417 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:26,417 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:31:26,418 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:31:26,418 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:31:26,418 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:31:26,418 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:26,418 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:31:26,418 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:31:26,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:31:26,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:31:26,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:31:26,423 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:31:26,423 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:31:26,425 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:31:26,425 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:31:26,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:31:26,427 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:26,427 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:31:26,428 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740 2024-11-12T18:31:26,430 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740 2024-11-12T18:31:26,431 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:31:26,431 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:31:26,432 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:26,434 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:31:26,435 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62857598, jitterRate=-0.06334879994392395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:26,435 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:31:26,436 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436286418Writing region info on filesystem at 1731436286418Initializing all the Stores at 1731436286420 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436286420Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436286420Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436286420Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436286420Cleaning up temporary data from old regions at 1731436286431 (+11 ms)Running coprocessor post-open hooks at 1731436286435 (+4 ms)Region opened successfully at 1731436286435 2024-11-12T18:31:26,437 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436286390 2024-11-12T18:31:26,441 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:31:26,441 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:31:26,442 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,46259,1731436285721 2024-11-12T18:31:26,444 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,46259,1731436285721, state=OPEN 2024-11-12T18:31:26,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:26,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:26,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:26,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:26,446 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,46259,1731436285721 2024-11-12T18:31:26,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:26,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:31:26,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,46259,1731436285721 in 211 msec 2024-11-12T18:31:26,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:31:26,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 381 msec 2024-11-12T18:31:26,457 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:26,457 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:31:26,459 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:31:26,459 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,46259,1731436285721, seqNum=-1] 2024-11-12T18:31:26,459 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:26,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37429, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:26,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 503 msec 2024-11-12T18:31:26,470 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436286470, completionTime=-1 2024-11-12T18:31:26,470 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T18:31:26,470 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436346473 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436406473 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42225,1731436285642-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42225,1731436285642-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,473 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42225,1731436285642-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,474 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:42225, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,474 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,474 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,476 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.693sec 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42225,1731436285642-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:26,480 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42225,1731436285642-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:31:26,483 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:31:26,483 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:31:26,483 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42225,1731436285642-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:26,569 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32980091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:26,569 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,42225,-1 for getting cluster id 2024-11-12T18:31:26,569 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:31:26,571 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '631c71ce-b5af-4d18-9f7a-faed3b4fbb53' 2024-11-12T18:31:26,571 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:31:26,571 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "631c71ce-b5af-4d18-9f7a-faed3b4fbb53" 2024-11-12T18:31:26,572 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b7542a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:26,572 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,42225,-1] 2024-11-12T18:31:26,572 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:31:26,572 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:26,574 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:31:26,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c15c469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:26,575 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:31:26,577 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,46259,1731436285721, seqNum=-1] 2024-11-12T18:31:26,578 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:26,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35510, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:26,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,42225,1731436285642 2024-11-12T18:31:26,583 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:31:26,585 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,42225,1731436285642 2024-11-12T18:31:26,585 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@56ffef4b 2024-11-12T18:31:26,585 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:31:26,587 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:31:26,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:31:26,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T18:31:26,593 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:31:26,593 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T18:31:26,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:31:26,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:26,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741837_1013 (size=392) 2024-11-12T18:31:26,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741837_1013 (size=392) 2024-11-12T18:31:26,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741837_1013 (size=392) 2024-11-12T18:31:26,609 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5a4d8b0fa1c9dd88cc008a89a0174d7d, NAME => 'TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2 2024-11-12T18:31:26,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741838_1014 (size=51) 2024-11-12T18:31:26,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741838_1014 (size=51) 2024-11-12T18:31:26,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741838_1014 (size=51) 2024-11-12T18:31:26,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:26,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 5a4d8b0fa1c9dd88cc008a89a0174d7d, disabling compactions & flushes 2024-11-12T18:31:26,620 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. after waiting 0 ms 2024-11-12T18:31:26,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,620 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,620 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5a4d8b0fa1c9dd88cc008a89a0174d7d: Waiting for close lock at 1731436286620Disabling compacts and flushes for region at 1731436286620Disabling writes for close at 1731436286620Writing region close event to WAL at 1731436286620Closed at 1731436286620 2024-11-12T18:31:26,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:31:26,623 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731436286622"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436286622"}]},"ts":"1731436286622"} 2024-11-12T18:31:26,626 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:31:26,628 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:31:26,628 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436286628"}]},"ts":"1731436286628"} 2024-11-12T18:31:26,631 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T18:31:26,631 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9911683f163c=0} racks are {/default-rack=0} 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T18:31:26,632 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T18:31:26,632 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T18:31:26,632 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T18:31:26,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T18:31:26,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5a4d8b0fa1c9dd88cc008a89a0174d7d, ASSIGN}] 2024-11-12T18:31:26,634 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5a4d8b0fa1c9dd88cc008a89a0174d7d, ASSIGN 2024-11-12T18:31:26,636 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5a4d8b0fa1c9dd88cc008a89a0174d7d, ASSIGN; state=OFFLINE, location=9911683f163c,44871,1731436285751; forceNewPlan=false, retain=false 2024-11-12T18:31:26,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:26,786 INFO [9911683f163c:42225 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T18:31:26,787 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5a4d8b0fa1c9dd88cc008a89a0174d7d, regionState=OPENING, regionLocation=9911683f163c,44871,1731436285751 2024-11-12T18:31:26,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5a4d8b0fa1c9dd88cc008a89a0174d7d, ASSIGN because future has completed 2024-11-12T18:31:26,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a4d8b0fa1c9dd88cc008a89a0174d7d, server=9911683f163c,44871,1731436285751}] 2024-11-12T18:31:26,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:26,946 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:31:26,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41153, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:31:26,952 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,952 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5a4d8b0fa1c9dd88cc008a89a0174d7d, NAME => 'TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:26,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:26,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,955 INFO [StoreOpener-5a4d8b0fa1c9dd88cc008a89a0174d7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,957 INFO [StoreOpener-5a4d8b0fa1c9dd88cc008a89a0174d7d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a4d8b0fa1c9dd88cc008a89a0174d7d columnFamilyName cf 2024-11-12T18:31:26,957 DEBUG [StoreOpener-5a4d8b0fa1c9dd88cc008a89a0174d7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:26,957 INFO [StoreOpener-5a4d8b0fa1c9dd88cc008a89a0174d7d-1 {}] regionserver.HStore(327): Store=5a4d8b0fa1c9dd88cc008a89a0174d7d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:26,957 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,958 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,959 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,959 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,959 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,961 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,963 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:26,964 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5a4d8b0fa1c9dd88cc008a89a0174d7d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61540147, jitterRate=-0.08298034965991974}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:31:26,964 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:26,965 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5a4d8b0fa1c9dd88cc008a89a0174d7d: Running coprocessor pre-open hook at 1731436286953Writing region info on filesystem at 1731436286953Initializing all the Stores at 1731436286955 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436286955Cleaning up temporary data from old regions at 1731436286959 (+4 ms)Running coprocessor post-open hooks at 1731436286964 (+5 ms)Region opened successfully at 1731436286965 (+1 ms) 2024-11-12T18:31:26,966 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d., pid=6, masterSystemTime=1731436286945 2024-11-12T18:31:26,970 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,970 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:26,971 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5a4d8b0fa1c9dd88cc008a89a0174d7d, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,44871,1731436285751 2024-11-12T18:31:26,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a4d8b0fa1c9dd88cc008a89a0174d7d, server=9911683f163c,44871,1731436285751 because future has completed 2024-11-12T18:31:26,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:31:26,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5a4d8b0fa1c9dd88cc008a89a0174d7d, server=9911683f163c,44871,1731436285751 in 185 msec 2024-11-12T18:31:26,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:31:26,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5a4d8b0fa1c9dd88cc008a89a0174d7d, ASSIGN in 348 msec 2024-11-12T18:31:26,985 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:31:26,986 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436286985"}]},"ts":"1731436286985"} 2024-11-12T18:31:26,989 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T18:31:26,991 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:31:26,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 403 msec 2024-11-12T18:31:27,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:27,228 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T18:31:27,228 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T18:31:27,228 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:31:27,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T18:31:27,232 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:31:27,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T18:31:27,239 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d., hostname=9911683f163c,44871,1731436285751, seqNum=2] 2024-11-12T18:31:27,239 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:27,242 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41232, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:27,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-12T18:31:27,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T18:31:27,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:27,248 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T18:31:27,250 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T18:31:27,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T18:31:27,279 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:31:27,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:27,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:27,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:27,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44871 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T18:31:27,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,405 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5a4d8b0fa1c9dd88cc008a89a0174d7d 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T18:31:27,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/.tmp/cf/3d76d9e3df0b430fb9e06e410a7ea3b6 is 36, key is row/cf:cq/1731436287242/Put/seqid=0 2024-11-12T18:31:27,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741839_1015 (size=4787) 2024-11-12T18:31:27,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741839_1015 (size=4787) 2024-11-12T18:31:27,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741839_1015 (size=4787) 2024-11-12T18:31:27,434 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/.tmp/cf/3d76d9e3df0b430fb9e06e410a7ea3b6 2024-11-12T18:31:27,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/.tmp/cf/3d76d9e3df0b430fb9e06e410a7ea3b6 as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/cf/3d76d9e3df0b430fb9e06e410a7ea3b6 2024-11-12T18:31:27,451 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/cf/3d76d9e3df0b430fb9e06e410a7ea3b6, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T18:31:27,452 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 5a4d8b0fa1c9dd88cc008a89a0174d7d in 47ms, sequenceid=5, compaction requested=false 2024-11-12T18:31:27,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5a4d8b0fa1c9dd88cc008a89a0174d7d: 2024-11-12T18:31:27,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T18:31:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T18:31:27,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T18:31:27,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-11-12T18:31:27,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 215 msec 2024-11-12T18:31:27,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:27,568 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T18:31:27,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:31:27,573 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:31:27,573 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:27,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,573 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:31:27,573 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1155378738, stopped=false 2024-11-12T18:31:27,574 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:31:27,574 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,42225,1731436285642 2024-11-12T18:31:27,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:27,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:27,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:27,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:27,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:27,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:27,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:27,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:27,576 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:31:27,576 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:31:27,576 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:27,576 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:27,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,577 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:27,577 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,43201,1731436285691' ***** 2024-11-12T18:31:27,577 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:27,577 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:27,577 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:27,577 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,46259,1731436285721' ***** 2024-11-12T18:31:27,577 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:27,577 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,44871,1731436285751' ***** 2024-11-12T18:31:27,577 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:27,577 INFO [RS:1;9911683f163c:46259 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:27,577 INFO [RS:0;9911683f163c:43201 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:27,577 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:27,577 INFO [RS:1;9911683f163c:46259 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:27,577 INFO [RS:1;9911683f163c:46259 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:27,577 INFO [RS:0;9911683f163c:43201 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:27,577 INFO [RS:2;9911683f163c:44871 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:27,578 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:27,578 INFO [RS:2;9911683f163c:44871 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:27,578 INFO [RS:2;9911683f163c:44871 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:27,578 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:27,578 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(3091): Received CLOSE for 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:27,578 INFO [RS:0;9911683f163c:43201 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:27,578 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,46259,1731436285721 2024-11-12T18:31:27,578 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,43201,1731436285691 2024-11-12T18:31:27,578 INFO [RS:1;9911683f163c:46259 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:27,578 INFO [RS:0;9911683f163c:43201 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:27,578 INFO [RS:1;9911683f163c:46259 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9911683f163c:46259. 2024-11-12T18:31:27,578 INFO [RS:0;9911683f163c:43201 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:43201. 2024-11-12T18:31:27,578 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,44871,1731436285751 2024-11-12T18:31:27,579 INFO [RS:2;9911683f163c:44871 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:27,579 DEBUG [RS:0;9911683f163c:43201 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:27,579 DEBUG [RS:1;9911683f163c:46259 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:27,579 DEBUG [RS:0;9911683f163c:43201 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,579 INFO [RS:2;9911683f163c:44871 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9911683f163c:44871. 2024-11-12T18:31:27,579 DEBUG [RS:1;9911683f163c:46259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,579 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5a4d8b0fa1c9dd88cc008a89a0174d7d, disabling compactions & flushes 2024-11-12T18:31:27,579 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,43201,1731436285691; all regions closed. 2024-11-12T18:31:27,579 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,579 DEBUG [RS:2;9911683f163c:44871 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:27,579 INFO [RS:1;9911683f163c:46259 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:27,579 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,579 DEBUG [RS:2;9911683f163c:44871 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,579 INFO [RS:1;9911683f163c:46259 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:27,579 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. after waiting 0 ms 2024-11-12T18:31:27,579 INFO [RS:1;9911683f163c:46259 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:27,579 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,579 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T18:31:27,579 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1325): Online Regions={5a4d8b0fa1c9dd88cc008a89a0174d7d=TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d.} 2024-11-12T18:31:27,579 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:31:27,579 DEBUG [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1351): Waiting on 5a4d8b0fa1c9dd88cc008a89a0174d7d 2024-11-12T18:31:27,579 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,580 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,580 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,580 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,581 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T18:31:27,581 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:31:27,582 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:31:27,582 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:31:27,582 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:31:27,582 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:31:27,582 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:31:27,582 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:31:27,582 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T18:31:27,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741834_1010 (size=93) 2024-11-12T18:31:27,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741834_1010 (size=93) 2024-11-12T18:31:27,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741834_1010 (size=93) 2024-11-12T18:31:27,590 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/default/TestHBaseWalOnEC/5a4d8b0fa1c9dd88cc008a89a0174d7d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T18:31:27,590 DEBUG [RS:0;9911683f163c:43201 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs 2024-11-12T18:31:27,590 INFO [RS:0;9911683f163c:43201 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C43201%2C1731436285691:(num 1731436286193) 2024-11-12T18:31:27,590 DEBUG [RS:0;9911683f163c:43201 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,590 INFO [RS:0;9911683f163c:43201 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:27,590 INFO [RS:0;9911683f163c:43201 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:27,591 INFO [RS:0;9911683f163c:43201 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:27,591 INFO [RS:0;9911683f163c:43201 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:27,591 INFO [RS:0;9911683f163c:43201 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:27,591 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:27,591 INFO [RS:0;9911683f163c:43201 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:27,591 INFO [RS:0;9911683f163c:43201 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:27,591 INFO [RS:0;9911683f163c:43201 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43201 2024-11-12T18:31:27,591 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,591 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5a4d8b0fa1c9dd88cc008a89a0174d7d: Waiting for close lock at 1731436287579Running coprocessor pre-close hooks at 1731436287579Disabling compacts and flushes for region at 1731436287579Disabling writes for close at 1731436287579Writing region close event to WAL at 1731436287582 (+3 ms)Running coprocessor post-close hooks at 1731436287591 (+9 ms)Closed at 1731436287591 2024-11-12T18:31:27,592 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d. 2024-11-12T18:31:27,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,43201,1731436285691 2024-11-12T18:31:27,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:27,593 INFO [RS:0;9911683f163c:43201 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:27,595 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,43201,1731436285691] 2024-11-12T18:31:27,596 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,43201,1731436285691 already deleted, retry=false 2024-11-12T18:31:27,596 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,43201,1731436285691 expired; onlineServers=2 2024-11-12T18:31:27,606 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/info/bd4880a70bad471a89a061ca9ee9f699 is 153, key is TestHBaseWalOnEC,,1731436286588.5a4d8b0fa1c9dd88cc008a89a0174d7d./info:regioninfo/1731436286971/Put/seqid=0 2024-11-12T18:31:27,644 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:27,644 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741840_1016 (size=6637) 2024-11-12T18:31:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741840_1016 (size=6637) 2024-11-12T18:31:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741840_1016 (size=6637) 2024-11-12T18:31:27,654 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:27,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:27,695 INFO [RS:0;9911683f163c:43201 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:27,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43201-0x100354319530001, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:27,695 INFO [RS:0;9911683f163c:43201 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,43201,1731436285691; zookeeper connection closed. 2024-11-12T18:31:27,695 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@26e7bc24 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@26e7bc24 2024-11-12T18:31:27,779 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,44871,1731436285751; all regions closed. 2024-11-12T18:31:27,780 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,780 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,780 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,780 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,781 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:27,782 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:31:27,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741833_1009 (size=1298) 2024-11-12T18:31:27,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741833_1009 (size=1298) 2024-11-12T18:31:27,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741833_1009 (size=1298) 2024-11-12T18:31:27,788 DEBUG [RS:2;9911683f163c:44871 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C44871%2C1731436285751:(num 1731436286174) 2024-11-12T18:31:27,788 DEBUG [RS:2;9911683f163c:44871 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:27,788 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:27,788 INFO [RS:2;9911683f163c:44871 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:27,789 INFO [RS:2;9911683f163c:44871 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44871 2024-11-12T18:31:27,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,44871,1731436285751 2024-11-12T18:31:27,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:27,791 INFO [RS:2;9911683f163c:44871 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:27,793 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,44871,1731436285751] 2024-11-12T18:31:27,794 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,44871,1731436285751 already deleted, retry=false 2024-11-12T18:31:27,794 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,44871,1731436285751 expired; onlineServers=1 2024-11-12T18:31:27,893 INFO [RS:2;9911683f163c:44871 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:27,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:27,893 INFO [RS:2;9911683f163c:44871 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,44871,1731436285751; zookeeper connection closed. 2024-11-12T18:31:27,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44871-0x100354319530003, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:27,893 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5c84fc59 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5c84fc59 2024-11-12T18:31:27,982 DEBUG [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:31:28,051 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/info/bd4880a70bad471a89a061ca9ee9f699 2024-11-12T18:31:28,052 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T18:31:28,052 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T18:31:28,074 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/ns/726deec07a3941bda7ebfb6075f96f3b is 43, key is default/ns:d/1731436286462/Put/seqid=0 2024-11-12T18:31:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741841_1017 (size=5153) 2024-11-12T18:31:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741841_1017 (size=5153) 2024-11-12T18:31:28,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741841_1017 (size=5153) 2024-11-12T18:31:28,083 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/ns/726deec07a3941bda7ebfb6075f96f3b 2024-11-12T18:31:28,107 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/table/009b53dde5344d269990d4c57af1378c is 52, key is TestHBaseWalOnEC/table:state/1731436286985/Put/seqid=0 2024-11-12T18:31:28,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741842_1018 (size=5249) 2024-11-12T18:31:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741842_1018 (size=5249) 2024-11-12T18:31:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741842_1018 (size=5249) 2024-11-12T18:31:28,117 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/table/009b53dde5344d269990d4c57af1378c 2024-11-12T18:31:28,125 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/info/bd4880a70bad471a89a061ca9ee9f699 as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/info/bd4880a70bad471a89a061ca9ee9f699 2024-11-12T18:31:28,134 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/info/bd4880a70bad471a89a061ca9ee9f699, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T18:31:28,136 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/ns/726deec07a3941bda7ebfb6075f96f3b as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/ns/726deec07a3941bda7ebfb6075f96f3b 2024-11-12T18:31:28,145 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/ns/726deec07a3941bda7ebfb6075f96f3b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T18:31:28,146 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/.tmp/table/009b53dde5344d269990d4c57af1378c as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/table/009b53dde5344d269990d4c57af1378c 2024-11-12T18:31:28,156 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/table/009b53dde5344d269990d4c57af1378c, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T18:31:28,158 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 576ms, sequenceid=11, compaction requested=false 2024-11-12T18:31:28,165 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T18:31:28,166 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:31:28,166 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:28,166 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436287581Running coprocessor pre-close hooks at 1731436287581Disabling compacts and flushes for region at 1731436287581Disabling writes for close at 1731436287582 (+1 ms)Obtaining lock to block concurrent updates at 1731436287582Preparing flush snapshotting stores in 1588230740 at 1731436287582Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731436287583 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731436287583Flushing 1588230740/info: creating writer at 1731436287584 (+1 ms)Flushing 1588230740/info: appending metadata at 1731436287605 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731436287605Flushing 1588230740/ns: creating writer at 1731436288059 (+454 ms)Flushing 1588230740/ns: appending metadata at 1731436288074 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731436288074Flushing 1588230740/table: creating writer at 1731436288091 (+17 ms)Flushing 1588230740/table: appending metadata at 1731436288107 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731436288107Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@325adedb: reopening flushed file at 1731436288124 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5abc54b7: reopening flushed file at 1731436288134 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6469973b: reopening flushed file at 1731436288145 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 576ms, sequenceid=11, compaction requested=false at 1731436288158 (+13 ms)Writing region close event to WAL at 1731436288160 (+2 ms)Running coprocessor post-close hooks at 1731436288166 (+6 ms)Closed at 1731436288166 2024-11-12T18:31:28,167 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:28,182 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,46259,1731436285721; all regions closed. 2024-11-12T18:31:28,183 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,183 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,183 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,184 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,184 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741836_1012 (size=2751) 2024-11-12T18:31:28,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741836_1012 (size=2751) 2024-11-12T18:31:28,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741836_1012 (size=2751) 2024-11-12T18:31:28,192 DEBUG [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs 2024-11-12T18:31:28,192 INFO [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C46259%2C1731436285721.meta:.meta(num 1731436286401) 2024-11-12T18:31:28,193 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,193 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,193 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,194 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,194 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741835_1011 (size=93) 2024-11-12T18:31:28,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741835_1011 (size=93) 2024-11-12T18:31:28,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741835_1011 (size=93) 2024-11-12T18:31:28,208 DEBUG [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/oldWALs 2024-11-12T18:31:28,208 INFO [RS:1;9911683f163c:46259 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C46259%2C1731436285721:(num 1731436286195) 2024-11-12T18:31:28,208 DEBUG [RS:1;9911683f163c:46259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:28,208 INFO [RS:1;9911683f163c:46259 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:28,208 INFO [RS:1;9911683f163c:46259 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:28,208 INFO [RS:1;9911683f163c:46259 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:28,208 INFO [RS:1;9911683f163c:46259 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:28,208 INFO [RS:1;9911683f163c:46259 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46259 2024-11-12T18:31:28,209 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:28,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:28,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,46259,1731436285721 2024-11-12T18:31:28,211 INFO [RS:1;9911683f163c:46259 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:28,212 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,46259,1731436285721] 2024-11-12T18:31:28,214 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,46259,1731436285721 already deleted, retry=false 2024-11-12T18:31:28,214 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,46259,1731436285721 expired; onlineServers=0 2024-11-12T18:31:28,214 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,42225,1731436285642' ***** 2024-11-12T18:31:28,214 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:31:28,214 INFO [M:0;9911683f163c:42225 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:28,214 INFO [M:0;9911683f163c:42225 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:28,214 DEBUG [M:0;9911683f163c:42225 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:31:28,214 DEBUG [M:0;9911683f163c:42225 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:31:28,214 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436285989 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436285989,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,214 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:31:28,214 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436285993 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436285993,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,214 INFO [M:0;9911683f163c:42225 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:28,214 INFO [M:0;9911683f163c:42225 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:28,215 DEBUG [M:0;9911683f163c:42225 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:31:28,215 INFO [M:0;9911683f163c:42225 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:31:28,215 INFO [M:0;9911683f163c:42225 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:31:28,215 INFO [M:0;9911683f163c:42225 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:31:28,215 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:31:28,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:28,218 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-12T18:31:28,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,218 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-12T18:31:28,219 INFO [M:0;9911683f163c:42225 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/.lastflushedseqids 2024-11-12T18:31:28,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741843_1019 (size=127) 2024-11-12T18:31:28,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741843_1019 (size=127) 2024-11-12T18:31:28,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741843_1019 (size=127) 2024-11-12T18:31:28,237 INFO [M:0;9911683f163c:42225 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:31:28,237 INFO [M:0;9911683f163c:42225 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:31:28,237 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:31:28,237 INFO [M:0;9911683f163c:42225 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,237 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,237 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:31:28,237 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,238 INFO [M:0;9911683f163c:42225 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-12T18:31:28,257 DEBUG [M:0;9911683f163c:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae03ea3cd3b84057ad17d964168f6d64 is 82, key is hbase:meta,,1/info:regioninfo/1731436286442/Put/seqid=0 2024-11-12T18:31:28,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741844_1020 (size=5672) 2024-11-12T18:31:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741844_1020 (size=5672) 2024-11-12T18:31:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741844_1020 (size=5672) 2024-11-12T18:31:28,273 INFO [M:0;9911683f163c:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae03ea3cd3b84057ad17d964168f6d64 2024-11-12T18:31:28,305 DEBUG [M:0;9911683f163c:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4cb912e3ce74715ab38454cb316830d is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436286993/Put/seqid=0 2024-11-12T18:31:28,310 WARN [IPC Server handler 3 on default port 40445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:31:28,310 WARN [IPC Server handler 3 on default port 40445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:31:28,310 WARN [IPC Server handler 3 on default port 40445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:31:28,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:28,313 INFO [RS:1;9911683f163c:46259 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:28,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x100354319530002, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:28,313 INFO [RS:1;9911683f163c:46259 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,46259,1731436285721; zookeeper connection closed. 2024-11-12T18:31:28,316 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a17fc32 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a17fc32 2024-11-12T18:31:28,317 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T18:31:28,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741845_1021 (size=6439) 2024-11-12T18:31:28,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741845_1021 (size=6439) 2024-11-12T18:31:28,326 INFO [M:0;9911683f163c:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4cb912e3ce74715ab38454cb316830d 2024-11-12T18:31:28,349 DEBUG [M:0;9911683f163c:42225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7141a8b5d6174b3eb58b408037dc5faa is 69, key is 9911683f163c,43201,1731436285691/rs:state/1731436285997/Put/seqid=0 2024-11-12T18:31:28,351 WARN [IPC Server handler 0 on default port 40445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:31:28,351 WARN [IPC Server handler 0 on default port 40445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:31:28,351 WARN [IPC Server handler 0 on default port 40445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:31:28,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741846_1022 (size=5294) 2024-11-12T18:31:28,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741846_1022 (size=5294) 2024-11-12T18:31:28,360 INFO [M:0;9911683f163c:42225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7141a8b5d6174b3eb58b408037dc5faa 2024-11-12T18:31:28,368 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ae03ea3cd3b84057ad17d964168f6d64 as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ae03ea3cd3b84057ad17d964168f6d64 2024-11-12T18:31:28,377 INFO [M:0;9911683f163c:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ae03ea3cd3b84057ad17d964168f6d64, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T18:31:28,379 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4cb912e3ce74715ab38454cb316830d as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c4cb912e3ce74715ab38454cb316830d 2024-11-12T18:31:28,387 INFO [M:0;9911683f163c:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c4cb912e3ce74715ab38454cb316830d, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T18:31:28,389 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7141a8b5d6174b3eb58b408037dc5faa as hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7141a8b5d6174b3eb58b408037dc5faa 2024-11-12T18:31:28,396 INFO [M:0;9911683f163c:42225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40445/user/jenkins/test-data/6dfb0c27-ed23-bc07-b5ca-e26576d233e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7141a8b5d6174b3eb58b408037dc5faa, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T18:31:28,398 INFO [M:0;9911683f163c:42225 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 161ms, sequenceid=72, compaction requested=false 2024-11-12T18:31:28,402 INFO [M:0;9911683f163c:42225 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,402 DEBUG [M:0;9911683f163c:42225 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436288237Disabling compacts and flushes for region at 1731436288237Disabling writes for close at 1731436288237Obtaining lock to block concurrent updates at 1731436288238 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436288238Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731436288239 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436288240 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436288240Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436288257 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436288257Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436288281 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436288304 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436288304Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436288333 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436288349 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436288349Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fafe45d: reopening flushed file at 1731436288367 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@788b4022: reopening flushed file at 1731436288378 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@643314ae: reopening flushed file at 1731436288388 (+10 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 161ms, sequenceid=72, compaction requested=false at 1731436288398 (+10 ms)Writing region close event to WAL at 1731436288401 (+3 ms)Closed at 1731436288401 2024-11-12T18:31:28,403 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,403 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,403 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,403 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,403 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:28,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34587 is added to blk_1073741830_1006 (size=32674) 2024-11-12T18:31:28,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741830_1006 (size=32674) 2024-11-12T18:31:28,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40111 is added to blk_1073741830_1006 (size=32674) 2024-11-12T18:31:28,408 INFO [M:0;9911683f163c:42225 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:31:28,408 INFO [M:0;9911683f163c:42225 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42225 2024-11-12T18:31:28,408 INFO [M:0;9911683f163c:42225 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:28,408 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:28,510 INFO [M:0;9911683f163c:42225 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:28,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:28,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42225-0x100354319530000, quorum=127.0.0.1:53243, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:28,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d5ce2d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:28,517 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3403c4d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:28,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:28,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fa9c833{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:28,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@427b8cb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:28,519 WARN [BP-299880525-172.17.0.3-1731436284742 heartbeating to localhost/127.0.0.1:40445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:28,519 WARN [BP-299880525-172.17.0.3-1731436284742 heartbeating to localhost/127.0.0.1:40445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-299880525-172.17.0.3-1731436284742 (Datanode Uuid f6ff7d22-54ff-4f95-b3b3-2f72d102ac96) service to localhost/127.0.0.1:40445 2024-11-12T18:31:28,519 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:28,519 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,520 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data5/current/BP-299880525-172.17.0.3-1731436284742 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:28,521 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data6/current/BP-299880525-172.17.0.3-1731436284742 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:28,521 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:28,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2306e37f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:28,526 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@641c2a01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:28,526 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:28,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d995367{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:28,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d788f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:28,530 WARN [BP-299880525-172.17.0.3-1731436284742 heartbeating to localhost/127.0.0.1:40445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:28,530 WARN [BP-299880525-172.17.0.3-1731436284742 heartbeating to localhost/127.0.0.1:40445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-299880525-172.17.0.3-1731436284742 (Datanode Uuid f8428aca-e83b-4878-92a0-e572b96cc7c1) service to localhost/127.0.0.1:40445 2024-11-12T18:31:28,530 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:28,530 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data3/current/BP-299880525-172.17.0.3-1731436284742 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:28,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data4/current/BP-299880525-172.17.0.3-1731436284742 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:28,532 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:28,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3950f25b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:28,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@151f492e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:28,538 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:28,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f3f348d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:28,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23b354d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:28,539 WARN [BP-299880525-172.17.0.3-1731436284742 heartbeating to localhost/127.0.0.1:40445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:28,539 WARN [BP-299880525-172.17.0.3-1731436284742 heartbeating to localhost/127.0.0.1:40445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-299880525-172.17.0.3-1731436284742 (Datanode Uuid f2b60233-d509-452c-93a8-e2b2eb8bf6e5) service to localhost/127.0.0.1:40445 2024-11-12T18:31:28,539 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:28,539 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,540 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data1/current/BP-299880525-172.17.0.3-1731436284742 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:28,540 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/cluster_7e76b460-1d57-8118-78ba-b3cfea092276/data/data2/current/BP-299880525-172.17.0.3-1731436284742 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:28,541 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:28,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3677c717{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:31:28,548 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6800152e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:28,548 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:28,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a1af01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:28,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@467f22c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a043466-6f48-7378-f6ae-77e87fe81a3a/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:28,556 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:31:28,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:31:28,594 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=150 (was 90) - Thread LEAK? -, OpenFileDescriptor=519 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=172 (was 152) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6197 (was 6422)