2024-12-07 06:51:43,307 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 06:51:43,324 main DEBUG Took 0.014333 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 06:51:43,324 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 06:51:43,324 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 06:51:43,326 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 06:51:43,327 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,339 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 06:51:43,361 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,363 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,364 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,365 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,365 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,366 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,367 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,367 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,368 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,368 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,369 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,370 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,371 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,371 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,372 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,372 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,373 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,373 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,374 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,374 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,375 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,375 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,376 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,376 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 06:51:43,377 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,377 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 06:51:43,379 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 06:51:43,380 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 06:51:43,382 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 06:51:43,383 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 06:51:43,384 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 06:51:43,385 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 06:51:43,395 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 06:51:43,398 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 06:51:43,399 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 06:51:43,400 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 06:51:43,400 main DEBUG createAppenders(={Console}) 2024-12-07 06:51:43,401 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-07 06:51:43,402 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 06:51:43,402 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-07 06:51:43,403 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 06:51:43,403 main DEBUG OutputStream closed 2024-12-07 06:51:43,403 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 06:51:43,404 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 06:51:43,404 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-07 06:51:43,487 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 06:51:43,489 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 06:51:43,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 06:51:43,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 06:51:43,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 06:51:43,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 06:51:43,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 06:51:43,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 06:51:43,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 06:51:43,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 06:51:43,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 06:51:43,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 06:51:43,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 06:51:43,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 06:51:43,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 06:51:43,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 06:51:43,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 06:51:43,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 06:51:43,501 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 06:51:43,501 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-07 06:51:43,501 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 06:51:43,502 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-07T06:51:43,520 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-07 06:51:43,524 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 06:51:43,524 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T06:51:43,782 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752 2024-12-07T06:51:43,810 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f, deleteOnExit=true 2024-12-07T06:51:43,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/test.cache.data in system properties and HBase conf 2024-12-07T06:51:43,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T06:51:43,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir in system properties and HBase conf 2024-12-07T06:51:43,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T06:51:43,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T06:51:43,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T06:51:43,916 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T06:51:44,012 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T06:51:44,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T06:51:44,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T06:51:44,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T06:51:44,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T06:51:44,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T06:51:44,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T06:51:44,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T06:51:44,021 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T06:51:44,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T06:51:44,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/nfs.dump.dir in system properties and HBase conf 2024-12-07T06:51:44,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/java.io.tmpdir in system properties and HBase conf 2024-12-07T06:51:44,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T06:51:44,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T06:51:44,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T06:51:44,868 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T06:51:44,959 INFO [Time-limited test {}] log.Log(170): Logging initialized @2345ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T06:51:45,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:45,100 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:45,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:45,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:45,124 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T06:51:45,137 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:45,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:45,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:45,353 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/java.io.tmpdir/jetty-localhost-37165-hadoop-hdfs-3_4_1-tests_jar-_-any-17263509984273484854/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T06:51:45,366 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:37165} 2024-12-07T06:51:45,366 INFO [Time-limited test {}] server.Server(415): Started @2753ms 2024-12-07T06:51:45,761 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:45,768 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:45,769 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:45,769 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:45,769 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T06:51:45,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:45,771 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:45,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/java.io.tmpdir/jetty-localhost-40337-hadoop-hdfs-3_4_1-tests_jar-_-any-3628901478490615537/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:45,899 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:40337} 2024-12-07T06:51:45,899 INFO [Time-limited test {}] server.Server(415): Started @3286ms 2024-12-07T06:51:45,954 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T06:51:46,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:46,090 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:46,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:46,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:46,094 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T06:51:46,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:46,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:46,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/java.io.tmpdir/jetty-localhost-45865-hadoop-hdfs-3_4_1-tests_jar-_-any-4729368184342180036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:46,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:45865} 2024-12-07T06:51:46,254 INFO [Time-limited test {}] server.Server(415): Started @3641ms 2024-12-07T06:51:46,256 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T06:51:46,295 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:46,301 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:46,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:46,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:46,309 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T06:51:46,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:46,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:46,410 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data2/current/BP-264701312-172.17.0.2-1733554304606/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:46,410 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data4/current/BP-264701312-172.17.0.2-1733554304606/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:46,410 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data1/current/BP-264701312-172.17.0.2-1733554304606/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:46,410 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data3/current/BP-264701312-172.17.0.2-1733554304606/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:46,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/java.io.tmpdir/jetty-localhost-36269-hadoop-hdfs-3_4_1-tests_jar-_-any-3505951779555098870/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:46,452 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:36269} 2024-12-07T06:51:46,453 INFO [Time-limited test {}] server.Server(415): Started @3840ms 2024-12-07T06:51:46,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T06:51:46,462 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T06:51:46,463 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T06:51:46,534 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeeadee618d50bb with lease ID 0xd38ae85f3ff1e85c: Processing first storage report for DS-b32510fd-fa70-4702-835e-69b6a83e7c83 from datanode DatanodeRegistration(127.0.0.1:36227, datanodeUuid=7dd2d5a4-4761-49ce-bfb5-d88522fbc13d, infoPort=44567, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606) 2024-12-07T06:51:46,536 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeeadee618d50bb with lease ID 0xd38ae85f3ff1e85c: from storage DS-b32510fd-fa70-4702-835e-69b6a83e7c83 node DatanodeRegistration(127.0.0.1:36227, datanodeUuid=7dd2d5a4-4761-49ce-bfb5-d88522fbc13d, infoPort=44567, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-07T06:51:46,537 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x216b90fb55a6b546 with lease ID 0xd38ae85f3ff1e85b: Processing first storage report for DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2 from datanode DatanodeRegistration(127.0.0.1:39505, datanodeUuid=1ee0cd3f-06af-4b67-aa59-7d3751ac7ecb, infoPort=38391, infoSecurePort=0, ipcPort=46587, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606) 2024-12-07T06:51:46,537 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x216b90fb55a6b546 with lease ID 0xd38ae85f3ff1e85b: from storage DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2 node DatanodeRegistration(127.0.0.1:39505, datanodeUuid=1ee0cd3f-06af-4b67-aa59-7d3751ac7ecb, infoPort=38391, infoSecurePort=0, ipcPort=46587, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T06:51:46,537 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x216b90fb55a6b546 with lease ID 0xd38ae85f3ff1e85b: Processing first storage report for DS-b47721cf-5cbd-4b2b-9cdf-eba221bf1c0c from datanode DatanodeRegistration(127.0.0.1:39505, datanodeUuid=1ee0cd3f-06af-4b67-aa59-7d3751ac7ecb, infoPort=38391, infoSecurePort=0, ipcPort=46587, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606) 2024-12-07T06:51:46,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x216b90fb55a6b546 with lease ID 0xd38ae85f3ff1e85b: from storage DS-b47721cf-5cbd-4b2b-9cdf-eba221bf1c0c node DatanodeRegistration(127.0.0.1:39505, datanodeUuid=1ee0cd3f-06af-4b67-aa59-7d3751ac7ecb, infoPort=38391, infoSecurePort=0, ipcPort=46587, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:46,538 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeeadee618d50bb with lease ID 0xd38ae85f3ff1e85c: Processing first storage report for DS-da239d7d-486c-490e-ab2b-721c7be91b3d from datanode DatanodeRegistration(127.0.0.1:36227, datanodeUuid=7dd2d5a4-4761-49ce-bfb5-d88522fbc13d, infoPort=44567, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606) 2024-12-07T06:51:46,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeeadee618d50bb with lease ID 0xd38ae85f3ff1e85c: from storage DS-da239d7d-486c-490e-ab2b-721c7be91b3d node DatanodeRegistration(127.0.0.1:36227, datanodeUuid=7dd2d5a4-4761-49ce-bfb5-d88522fbc13d, infoPort=44567, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:46,590 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data5/current/BP-264701312-172.17.0.2-1733554304606/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:46,590 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data6/current/BP-264701312-172.17.0.2-1733554304606/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:46,619 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T06:51:46,626 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae09056aa93b7911 with lease ID 0xd38ae85f3ff1e85d: Processing first storage report for DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55 from datanode DatanodeRegistration(127.0.0.1:41381, datanodeUuid=4e00dcf7-9789-4453-bcb8-9aa883f42e96, infoPort=38655, infoSecurePort=0, ipcPort=43305, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606) 2024-12-07T06:51:46,626 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae09056aa93b7911 with lease ID 0xd38ae85f3ff1e85d: from storage DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55 node DatanodeRegistration(127.0.0.1:41381, datanodeUuid=4e00dcf7-9789-4453-bcb8-9aa883f42e96, infoPort=38655, infoSecurePort=0, ipcPort=43305, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T06:51:46,626 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae09056aa93b7911 with lease ID 0xd38ae85f3ff1e85d: Processing first storage report for DS-cb615147-bb7c-43a6-84e6-6076b96b628c from datanode DatanodeRegistration(127.0.0.1:41381, datanodeUuid=4e00dcf7-9789-4453-bcb8-9aa883f42e96, infoPort=38655, infoSecurePort=0, ipcPort=43305, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606) 2024-12-07T06:51:46,627 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae09056aa93b7911 with lease ID 0xd38ae85f3ff1e85d: from storage DS-cb615147-bb7c-43a6-84e6-6076b96b628c node DatanodeRegistration(127.0.0.1:41381, datanodeUuid=4e00dcf7-9789-4453-bcb8-9aa883f42e96, infoPort=38655, infoSecurePort=0, ipcPort=43305, storageInfo=lv=-57;cid=testClusterID;nsid=864243984;c=1733554304606), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T06:51:46,832 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752 2024-12-07T06:51:46,906 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-07T06:51:46,960 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=209, ProcessCount=11, AvailableMemoryMB=6911 2024-12-07T06:51:46,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T06:51:46,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-07T06:51:47,051 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/zookeeper_0, clientPort=56718, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T06:51:47,062 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56718 2024-12-07T06:51:47,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:47,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:47,197 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:47,198 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:47,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:52296 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52296 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:47,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-07T06:51:47,674 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:47,683 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313 with version=8 2024-12-07T06:51:47,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/hbase-staging 2024-12-07T06:51:47,778 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T06:51:48,054 INFO [Time-limited test {}] client.ConnectionUtils(128): master/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:48,064 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,065 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,069 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:48,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:48,208 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T06:51:48,272 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T06:51:48,281 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T06:51:48,284 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:48,312 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 11507 (auto-detected) 2024-12-07T06:51:48,313 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T06:51:48,333 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40219 2024-12-07T06:51:48,354 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40219 connecting to ZooKeeper ensemble=127.0.0.1:56718 2024-12-07T06:51:48,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402190x0, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:48,389 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40219-0x1018bb16a660000 connected 2024-12-07T06:51:48,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,423 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:48,441 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313, hbase.cluster.distributed=false 2024-12-07T06:51:48,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:48,475 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40219 2024-12-07T06:51:48,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40219 2024-12-07T06:51:48,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40219 2024-12-07T06:51:48,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40219 2024-12-07T06:51:48,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40219 2024-12-07T06:51:48,596 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:48,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,598 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:48,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:48,602 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T06:51:48,604 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:48,605 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39579 2024-12-07T06:51:48,607 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39579 connecting to ZooKeeper ensemble=127.0.0.1:56718 2024-12-07T06:51:48,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395790x0, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:48,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39579-0x1018bb16a660001 connected 2024-12-07T06:51:48,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:48,623 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T06:51:48,631 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T06:51:48,633 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T06:51:48,638 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:48,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39579 2024-12-07T06:51:48,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39579 2024-12-07T06:51:48,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39579 2024-12-07T06:51:48,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39579 2024-12-07T06:51:48,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39579 2024-12-07T06:51:48,661 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:48,661 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,662 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,662 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:48,662 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,662 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:48,663 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T06:51:48,663 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:48,664 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40491 2024-12-07T06:51:48,665 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40491 connecting to ZooKeeper ensemble=127.0.0.1:56718 2024-12-07T06:51:48,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404910x0, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:48,674 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:48,675 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40491-0x1018bb16a660002 connected 2024-12-07T06:51:48,675 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T06:51:48,676 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T06:51:48,677 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T06:51:48,679 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:48,680 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40491 2024-12-07T06:51:48,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40491 2024-12-07T06:51:48,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40491 2024-12-07T06:51:48,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40491 2024-12-07T06:51:48,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40491 2024-12-07T06:51:48,699 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:48,699 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,699 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,699 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:48,699 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:48,699 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:48,699 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T06:51:48,700 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:48,701 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38361 2024-12-07T06:51:48,702 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38361 connecting to ZooKeeper ensemble=127.0.0.1:56718 2024-12-07T06:51:48,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,705 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383610x0, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:48,710 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38361-0x1018bb16a660003 connected 2024-12-07T06:51:48,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:48,711 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T06:51:48,712 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T06:51:48,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T06:51:48,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:48,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38361 2024-12-07T06:51:48,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38361 2024-12-07T06:51:48,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38361 2024-12-07T06:51:48,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38361 2024-12-07T06:51:48,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38361 2024-12-07T06:51:48,732 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;61c02eafbb40:40219 2024-12-07T06:51:48,733 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/61c02eafbb40,40219,1733554307838 2024-12-07T06:51:48,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,743 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/61c02eafbb40,40219,1733554307838 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,767 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T06:51:48,768 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/61c02eafbb40,40219,1733554307838 from backup master directory 2024-12-07T06:51:48,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/61c02eafbb40,40219,1733554307838 2024-12-07T06:51:48,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:48,772 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:48,772 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=61c02eafbb40,40219,1733554307838 2024-12-07T06:51:48,775 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T06:51:48,776 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T06:51:48,840 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/hbase.id] with ID: d0774f96-b726-4663-868e-6ff272f6b37e 2024-12-07T06:51:48,840 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/.tmp/hbase.id 2024-12-07T06:51:48,847 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:48,847 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:48,853 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:52312 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52312 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:48,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-07T06:51:48,860 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:48,860 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/.tmp/hbase.id]:[hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/hbase.id] 2024-12-07T06:51:48,907 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:48,912 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T06:51:48,933 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-12-07T06:51:48,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:48,950 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:48,950 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:48,953 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:52334 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52334 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:48,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-07T06:51:48,959 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:48,975 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T06:51:48,977 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T06:51:48,983 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T06:51:49,011 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,012 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:52358 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52358 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-07T06:51:49,022 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:49,041 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store 2024-12-07T06:51:49,058 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,059 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:39062 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39062 dst: /127.0.0.1:39505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-07T06:51:49,067 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:49,071 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T06:51:49,074 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:49,076 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T06:51:49,076 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:49,076 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:49,078 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T06:51:49,078 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:49,078 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:49,079 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733554309076Disabling compacts and flushes for region at 1733554309076Disabling writes for close at 1733554309078 (+2 ms)Writing region close event to WAL at 1733554309078Closed at 1733554309078 2024-12-07T06:51:49,081 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/.initializing 2024-12-07T06:51:49,081 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/WALs/61c02eafbb40,40219,1733554307838 2024-12-07T06:51:49,089 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T06:51:49,102 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C40219%2C1733554307838, suffix=, logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/WALs/61c02eafbb40,40219,1733554307838, archiveDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/oldWALs, maxLogs=10 2024-12-07T06:51:49,131 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/WALs/61c02eafbb40,40219,1733554307838/61c02eafbb40%2C40219%2C1733554307838.1733554309106, exclude list is [], retry=0 2024-12-07T06:51:49,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:49,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-b32510fd-fa70-4702-835e-69b6a83e7c83,DISK] 2024-12-07T06:51:49,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39505,DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2,DISK] 2024-12-07T06:51:49,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41381,DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55,DISK] 2024-12-07T06:51:49,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T06:51:49,193 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/WALs/61c02eafbb40,40219,1733554307838/61c02eafbb40%2C40219%2C1733554307838.1733554309106 2024-12-07T06:51:49,193 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38391:38391),(127.0.0.1/127.0.0.1:44567:44567),(127.0.0.1/127.0.0.1:38655:38655)] 2024-12-07T06:51:49,194 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T06:51:49,194 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:49,197 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,198 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,236 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T06:51:49,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:49,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T06:51:49,272 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:49,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T06:51:49,275 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:49,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T06:51:49,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:49,280 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,284 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,285 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,290 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,290 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,294 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T06:51:49,296 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:49,302 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T06:51:49,303 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75273054, jitterRate=0.1216559112071991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T06:51:49,309 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733554309211Initializing all the Stores at 1733554309213 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554309214 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554309214Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554309215 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554309215Cleaning up temporary data from old regions at 1733554309290 (+75 ms)Region opened successfully at 1733554309309 (+19 ms) 2024-12-07T06:51:49,310 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T06:51:49,344 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cbe8edf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:49,380 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T06:51:49,391 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T06:51:49,391 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T06:51:49,394 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T06:51:49,395 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T06:51:49,401 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-07T06:51:49,401 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T06:51:49,431 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T06:51:49,442 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T06:51:49,444 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T06:51:49,447 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T06:51:49,449 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T06:51:49,452 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T06:51:49,454 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T06:51:49,457 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T06:51:49,459 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T06:51:49,460 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T06:51:49,461 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T06:51:49,478 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T06:51:49,479 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T06:51:49,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,488 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=61c02eafbb40,40219,1733554307838, sessionid=0x1018bb16a660000, setting cluster-up flag (Was=false) 2024-12-07T06:51:49,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,509 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T06:51:49,511 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=61c02eafbb40,40219,1733554307838 2024-12-07T06:51:49,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:49,524 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T06:51:49,525 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=61c02eafbb40,40219,1733554307838 2024-12-07T06:51:49,531 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T06:51:49,605 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:49,614 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T06:51:49,621 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T06:51:49,621 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(746): ClusterId : d0774f96-b726-4663-868e-6ff272f6b37e 2024-12-07T06:51:49,621 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(746): ClusterId : d0774f96-b726-4663-868e-6ff272f6b37e 2024-12-07T06:51:49,622 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(746): ClusterId : d0774f96-b726-4663-868e-6ff272f6b37e 2024-12-07T06:51:49,624 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T06:51:49,624 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T06:51:49,624 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T06:51:49,630 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T06:51:49,630 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T06:51:49,631 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T06:51:49,631 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T06:51:49,631 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T06:51:49,631 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T06:51:49,628 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 61c02eafbb40,40219,1733554307838 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T06:51:49,636 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T06:51:49,637 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T06:51:49,637 DEBUG [RS:2;61c02eafbb40:38361 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a6b71ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:49,637 DEBUG [RS:0;61c02eafbb40:39579 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20192716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:49,638 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:49,638 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T06:51:49,638 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:49,639 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:49,639 DEBUG [RS:1;61c02eafbb40:40491 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70ce93c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:49,639 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:49,639 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/61c02eafbb40:0, corePoolSize=10, maxPoolSize=10 2024-12-07T06:51:49,639 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,639 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:49,640 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,653 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;61c02eafbb40:38361 2024-12-07T06:51:49,657 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T06:51:49,657 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T06:51:49,657 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T06:51:49,658 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:49,659 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T06:51:49,660 DEBUG [RS:1;61c02eafbb40:40491 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;61c02eafbb40:40491 2024-12-07T06:51:49,661 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(2659): reportForDuty to master=61c02eafbb40,40219,1733554307838 with port=38361, startcode=1733554308698 2024-12-07T06:51:49,661 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T06:51:49,661 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T06:51:49,661 DEBUG [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T06:51:49,662 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(2659): reportForDuty to master=61c02eafbb40,40219,1733554307838 with port=40491, startcode=1733554308661 2024-12-07T06:51:49,665 DEBUG [RS:0;61c02eafbb40:39579 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;61c02eafbb40:39579 2024-12-07T06:51:49,665 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T06:51:49,665 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T06:51:49,665 DEBUG [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T06:51:49,666 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(2659): reportForDuty to master=61c02eafbb40,40219,1733554307838 with port=39579, startcode=1733554308555 2024-12-07T06:51:49,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-07T06:51:49,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-07T06:51:49,669 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,669 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733554339669 2024-12-07T06:51:49,669 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T06:51:49,671 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T06:51:49,672 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T06:51:49,675 DEBUG [RS:0;61c02eafbb40:39579 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T06:51:49,675 DEBUG [RS:2;61c02eafbb40:38361 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T06:51:49,675 DEBUG [RS:1;61c02eafbb40:40491 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T06:51:49,676 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T06:51:49,677 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T06:51:49,677 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T06:51:49,677 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T06:51:49,681 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,683 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,683 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,686 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T06:51:49,687 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T06:51:49,688 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T06:51:49,696 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T06:51:49,697 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T06:51:49,708 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.large.0-1733554309698,5,FailOnTimeoutGroup] 2024-12-07T06:51:49,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:52630 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52630 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:49,716 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.small.0-1733554309709,5,FailOnTimeoutGroup] 2024-12-07T06:51:49,716 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,717 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T06:51:49,718 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,719 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-07T06:51:49,733 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:49,734 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T06:51:49,735 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313 2024-12-07T06:51:49,735 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49869, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T06:51:49,735 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32981, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T06:51:49,735 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43455, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T06:51:49,742 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40219 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 61c02eafbb40,40491,1733554308661 2024-12-07T06:51:49,745 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40219 {}] master.ServerManager(517): Registering regionserver=61c02eafbb40,40491,1733554308661 2024-12-07T06:51:49,749 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,749 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:49,756 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40219 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 61c02eafbb40,38361,1733554308698 2024-12-07T06:51:49,756 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40219 {}] master.ServerManager(517): Registering regionserver=61c02eafbb40,38361,1733554308698 2024-12-07T06:51:49,757 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:52406 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52406 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:49,761 DEBUG [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313 2024-12-07T06:51:49,761 DEBUG [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40093 2024-12-07T06:51:49,761 DEBUG [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T06:51:49,762 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40219 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 61c02eafbb40,39579,1733554308555 2024-12-07T06:51:49,762 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40219 {}] master.ServerManager(517): Registering regionserver=61c02eafbb40,39579,1733554308555 2024-12-07T06:51:49,762 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313 2024-12-07T06:51:49,762 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40093 2024-12-07T06:51:49,762 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T06:51:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-07T06:51:49,767 DEBUG [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313 2024-12-07T06:51:49,767 DEBUG [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40093 2024-12-07T06:51:49,767 DEBUG [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T06:51:49,769 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:49,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:49,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:49,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T06:51:49,774 DEBUG [RS:2;61c02eafbb40:38361 {}] zookeeper.ZKUtil(111): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/61c02eafbb40,38361,1733554308698 2024-12-07T06:51:49,774 DEBUG [RS:1;61c02eafbb40:40491 {}] zookeeper.ZKUtil(111): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/61c02eafbb40,40491,1733554308661 2024-12-07T06:51:49,774 WARN [RS:1;61c02eafbb40:40491 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:49,774 WARN [RS:2;61c02eafbb40:38361 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:49,774 INFO [RS:2;61c02eafbb40:38361 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T06:51:49,774 INFO [RS:1;61c02eafbb40:40491 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T06:51:49,775 DEBUG [RS:0;61c02eafbb40:39579 {}] zookeeper.ZKUtil(111): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/61c02eafbb40,39579,1733554308555 2024-12-07T06:51:49,775 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698 2024-12-07T06:51:49,775 DEBUG [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,40491,1733554308661 2024-12-07T06:51:49,775 WARN [RS:0;61c02eafbb40:39579 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:49,775 INFO [RS:0;61c02eafbb40:39579 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T06:51:49,775 DEBUG [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,39579,1733554308555 2024-12-07T06:51:49,777 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T06:51:49,777 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,777 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [61c02eafbb40,39579,1733554308555] 2024-12-07T06:51:49,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [61c02eafbb40,40491,1733554308661] 2024-12-07T06:51:49,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [61c02eafbb40,38361,1733554308698] 2024-12-07T06:51:49,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:49,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T06:51:49,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T06:51:49,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:49,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T06:51:49,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T06:51:49,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:49,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T06:51:49,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T06:51:49,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:49,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:49,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T06:51:49,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740 2024-12-07T06:51:49,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740 2024-12-07T06:51:49,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T06:51:49,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T06:51:49,803 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T06:51:49,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T06:51:49,808 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T06:51:49,808 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T06:51:49,808 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T06:51:49,815 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T06:51:49,817 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74206886, jitterRate=0.10576876997947693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T06:51:49,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733554309770Initializing all the Stores at 1733554309772 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554309773 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554309773Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554309773Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554309773Cleaning up temporary data from old regions at 1733554309802 (+29 ms)Region opened successfully at 1733554309820 (+18 ms) 2024-12-07T06:51:49,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T06:51:49,821 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T06:51:49,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T06:51:49,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T06:51:49,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T06:51:49,823 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T06:51:49,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733554309821Disabling compacts and flushes for region at 1733554309821Disabling writes for close at 1733554309821Writing region close event to WAL at 1733554309822 (+1 ms)Closed at 1733554309823 (+1 ms) 2024-12-07T06:51:49,826 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T06:51:49,826 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T06:51:49,826 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T06:51:49,827 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:49,828 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T06:51:49,833 INFO [RS:2;61c02eafbb40:38361 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T06:51:49,833 INFO [RS:1;61c02eafbb40:40491 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T06:51:49,833 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,833 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,834 INFO [RS:0;61c02eafbb40:39579 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T06:51:49,834 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,834 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T06:51:49,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T06:51:49,836 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T06:51:49,836 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T06:51:49,842 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T06:51:49,842 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T06:51:49,842 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T06:51:49,844 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,844 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,844 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,844 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,844 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:49,845 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:49,845 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,845 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:49,846 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,846 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,847 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,847 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:49,847 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:49,847 DEBUG [RS:2;61c02eafbb40:38361 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:49,847 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:49,847 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:49,847 DEBUG [RS:1;61c02eafbb40:40491 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:49,847 DEBUG [RS:0;61c02eafbb40:39579 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:49,848 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T06:51:49,849 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,849 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,849 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,849 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,850 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,850 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,38361,1733554308698-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:49,852 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,852 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,852 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,853 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,853 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,853 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,39579,1733554308555-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:49,853 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T06:51:49,857 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,857 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,857 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,857 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,857 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,857 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40491,1733554308661-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:49,875 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T06:51:49,877 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T06:51:49,877 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,38361,1733554308698-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,878 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,39579,1733554308555-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,878 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,878 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,878 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.Replication(171): 61c02eafbb40,39579,1733554308555 started 2024-12-07T06:51:49,878 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.Replication(171): 61c02eafbb40,38361,1733554308698 started 2024-12-07T06:51:49,880 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T06:51:49,880 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40491,1733554308661-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,880 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,880 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.Replication(171): 61c02eafbb40,40491,1733554308661 started 2024-12-07T06:51:49,898 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,898 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1482): Serving as 61c02eafbb40,38361,1733554308698, RpcServer on 61c02eafbb40/172.17.0.2:38361, sessionid=0x1018bb16a660003 2024-12-07T06:51:49,899 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T06:51:49,899 DEBUG [RS:2;61c02eafbb40:38361 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 61c02eafbb40,38361,1733554308698 2024-12-07T06:51:49,899 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,38361,1733554308698' 2024-12-07T06:51:49,900 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T06:51:49,901 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T06:51:49,901 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T06:51:49,901 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T06:51:49,902 DEBUG [RS:2;61c02eafbb40:38361 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 61c02eafbb40,38361,1733554308698 2024-12-07T06:51:49,902 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,38361,1733554308698' 2024-12-07T06:51:49,902 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T06:51:49,902 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T06:51:49,903 DEBUG [RS:2;61c02eafbb40:38361 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T06:51:49,903 INFO [RS:2;61c02eafbb40:38361 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T06:51:49,903 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,903 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:49,903 INFO [RS:2;61c02eafbb40:38361 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T06:51:49,903 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(1482): Serving as 61c02eafbb40,39579,1733554308555, RpcServer on 61c02eafbb40/172.17.0.2:39579, sessionid=0x1018bb16a660001 2024-12-07T06:51:49,903 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(1482): Serving as 61c02eafbb40,40491,1733554308661, RpcServer on 61c02eafbb40/172.17.0.2:40491, sessionid=0x1018bb16a660002 2024-12-07T06:51:49,904 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T06:51:49,904 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T06:51:49,904 DEBUG [RS:0;61c02eafbb40:39579 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 61c02eafbb40,39579,1733554308555 2024-12-07T06:51:49,904 DEBUG [RS:1;61c02eafbb40:40491 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 61c02eafbb40,40491,1733554308661 2024-12-07T06:51:49,904 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,39579,1733554308555' 2024-12-07T06:51:49,904 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,40491,1733554308661' 2024-12-07T06:51:49,904 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T06:51:49,904 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T06:51:49,905 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T06:51:49,905 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T06:51:49,905 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T06:51:49,905 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T06:51:49,905 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T06:51:49,905 DEBUG [RS:0;61c02eafbb40:39579 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 61c02eafbb40,39579,1733554308555 2024-12-07T06:51:49,905 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T06:51:49,905 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,39579,1733554308555' 2024-12-07T06:51:49,906 DEBUG [RS:1;61c02eafbb40:40491 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 61c02eafbb40,40491,1733554308661 2024-12-07T06:51:49,906 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T06:51:49,906 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,40491,1733554308661' 2024-12-07T06:51:49,906 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T06:51:49,906 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T06:51:49,906 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T06:51:49,907 DEBUG [RS:0;61c02eafbb40:39579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T06:51:49,907 DEBUG [RS:1;61c02eafbb40:40491 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T06:51:49,907 INFO [RS:0;61c02eafbb40:39579 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T06:51:49,907 INFO [RS:0;61c02eafbb40:39579 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T06:51:49,907 INFO [RS:1;61c02eafbb40:40491 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T06:51:49,907 INFO [RS:1;61c02eafbb40:40491 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T06:51:50,004 WARN [61c02eafbb40:40219 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T06:51:50,009 INFO [RS:0;61c02eafbb40:39579 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T06:51:50,009 INFO [RS:2;61c02eafbb40:38361 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T06:51:50,009 INFO [RS:1;61c02eafbb40:40491 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T06:51:50,012 INFO [RS:1;61c02eafbb40:40491 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C40491%2C1733554308661, suffix=, logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,40491,1733554308661, archiveDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs, maxLogs=32 2024-12-07T06:51:50,012 INFO [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C38361%2C1733554308698, suffix=, logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698, archiveDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs, maxLogs=32 2024-12-07T06:51:50,012 INFO [RS:0;61c02eafbb40:39579 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C39579%2C1733554308555, suffix=, logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,39579,1733554308555, archiveDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs, maxLogs=32 2024-12-07T06:51:50,032 DEBUG [RS:0;61c02eafbb40:39579 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,39579,1733554308555/61c02eafbb40%2C39579%2C1733554308555.1733554310017, exclude list is [], retry=0 2024-12-07T06:51:50,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39505,DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2,DISK] 2024-12-07T06:51:50,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41381,DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55,DISK] 2024-12-07T06:51:50,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-b32510fd-fa70-4702-835e-69b6a83e7c83,DISK] 2024-12-07T06:51:50,039 DEBUG [RS:2;61c02eafbb40:38361 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698/61c02eafbb40%2C38361%2C1733554308698.1733554310017, exclude list is [], retry=0 2024-12-07T06:51:50,039 DEBUG [RS:1;61c02eafbb40:40491 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,40491,1733554308661/61c02eafbb40%2C40491%2C1733554308661.1733554310017, exclude list is [], retry=0 2024-12-07T06:51:50,049 INFO [RS:0;61c02eafbb40:39579 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,39579,1733554308555/61c02eafbb40%2C39579%2C1733554308555.1733554310017 2024-12-07T06:51:50,050 DEBUG [RS:0;61c02eafbb40:39579 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38391:38391),(127.0.0.1/127.0.0.1:44567:44567),(127.0.0.1/127.0.0.1:38655:38655)] 2024-12-07T06:51:50,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41381,DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55,DISK] 2024-12-07T06:51:50,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-b32510fd-fa70-4702-835e-69b6a83e7c83,DISK] 2024-12-07T06:51:50,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39505,DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2,DISK] 2024-12-07T06:51:50,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-b32510fd-fa70-4702-835e-69b6a83e7c83,DISK] 2024-12-07T06:51:50,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39505,DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2,DISK] 2024-12-07T06:51:50,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41381,DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55,DISK] 2024-12-07T06:51:50,077 INFO [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698/61c02eafbb40%2C38361%2C1733554308698.1733554310017 2024-12-07T06:51:50,077 INFO [RS:1;61c02eafbb40:40491 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,40491,1733554308661/61c02eafbb40%2C40491%2C1733554308661.1733554310017 2024-12-07T06:51:50,078 DEBUG [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38655:38655),(127.0.0.1/127.0.0.1:44567:44567),(127.0.0.1/127.0.0.1:38391:38391)] 2024-12-07T06:51:50,079 DEBUG [RS:1;61c02eafbb40:40491 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38655:38655),(127.0.0.1/127.0.0.1:38391:38391),(127.0.0.1/127.0.0.1:44567:44567)] 2024-12-07T06:51:50,256 DEBUG [61c02eafbb40:40219 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T06:51:50,265 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(204): Hosts are {61c02eafbb40=0} racks are {/default-rack=0} 2024-12-07T06:51:50,271 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T06:51:50,272 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T06:51:50,272 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T06:51:50,272 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T06:51:50,272 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T06:51:50,272 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T06:51:50,272 INFO [61c02eafbb40:40219 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T06:51:50,272 INFO [61c02eafbb40:40219 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T06:51:50,272 INFO [61c02eafbb40:40219 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T06:51:50,272 DEBUG [61c02eafbb40:40219 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T06:51:50,279 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=61c02eafbb40,38361,1733554308698 2024-12-07T06:51:50,286 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 61c02eafbb40,38361,1733554308698, state=OPENING 2024-12-07T06:51:50,291 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T06:51:50,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:50,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:50,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:50,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:50,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,295 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T06:51:50,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=61c02eafbb40,38361,1733554308698}] 2024-12-07T06:51:50,473 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T06:51:50,475 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35227, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T06:51:50,487 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T06:51:50,488 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T06:51:50,488 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T06:51:50,492 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C38361%2C1733554308698.meta, suffix=.meta, logDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698, archiveDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs, maxLogs=32 2024-12-07T06:51:50,510 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698/61c02eafbb40%2C38361%2C1733554308698.meta.1733554310495.meta, exclude list is [], retry=0 2024-12-07T06:51:50,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-b32510fd-fa70-4702-835e-69b6a83e7c83,DISK] 2024-12-07T06:51:50,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41381,DS-000fd5f9-b760-44fb-bc96-9498f2fa9a55,DISK] 2024-12-07T06:51:50,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39505,DS-7c2a45a5-d1bb-4cae-93ca-5fab1abf19c2,DISK] 2024-12-07T06:51:50,517 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/WALs/61c02eafbb40,38361,1733554308698/61c02eafbb40%2C38361%2C1733554308698.meta.1733554310495.meta 2024-12-07T06:51:50,517 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38655:38655),(127.0.0.1/127.0.0.1:44567:44567),(127.0.0.1/127.0.0.1:38391:38391)] 2024-12-07T06:51:50,518 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T06:51:50,519 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T06:51:50,522 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T06:51:50,527 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T06:51:50,531 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T06:51:50,532 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:50,532 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T06:51:50,532 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T06:51:50,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T06:51:50,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T06:51:50,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:50,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:50,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T06:51:50,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T06:51:50,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:50,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:50,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T06:51:50,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T06:51:50,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:50,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:50,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T06:51:50,544 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T06:51:50,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:50,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:50,545 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T06:51:50,546 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740 2024-12-07T06:51:50,549 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740 2024-12-07T06:51:50,552 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T06:51:50,552 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T06:51:50,552 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T06:51:50,555 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T06:51:50,556 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70866561, jitterRate=0.055994048714637756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T06:51:50,557 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T06:51:50,558 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733554310533Writing region info on filesystem at 1733554310533Initializing all the Stores at 1733554310535 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554310535Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554310535Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554310535Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554310535Cleaning up temporary data from old regions at 1733554310552 (+17 ms)Running coprocessor post-open hooks at 1733554310557 (+5 ms)Region opened successfully at 1733554310558 (+1 ms) 2024-12-07T06:51:50,566 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733554310464 2024-12-07T06:51:50,577 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T06:51:50,577 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T06:51:50,579 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=61c02eafbb40,38361,1733554308698 2024-12-07T06:51:50,582 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 61c02eafbb40,38361,1733554308698, state=OPEN 2024-12-07T06:51:50,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:50,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:50,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:50,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:50,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:50,585 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=61c02eafbb40,38361,1733554308698 2024-12-07T06:51:50,589 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T06:51:50,589 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=61c02eafbb40,38361,1733554308698 in 288 msec 2024-12-07T06:51:50,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T06:51:50,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 755 msec 2024-12-07T06:51:50,598 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:50,598 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T06:51:50,618 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T06:51:50,619 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=61c02eafbb40,38361,1733554308698, seqNum=-1] 2024-12-07T06:51:50,640 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T06:51:50,642 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T06:51:50,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1020 sec 2024-12-07T06:51:50,661 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733554310661, completionTime=-1 2024-12-07T06:51:50,664 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T06:51:50,664 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T06:51:50,695 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T06:51:50,695 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733554370695 2024-12-07T06:51:50,695 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733554430695 2024-12-07T06:51:50,695 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 30 msec 2024-12-07T06:51:50,697 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T06:51:50,706 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40219,1733554307838-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,706 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40219,1733554307838-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,706 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40219,1733554307838-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,708 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-61c02eafbb40:40219, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,709 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,710 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,715 DEBUG [master/61c02eafbb40:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T06:51:50,738 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.966sec 2024-12-07T06:51:50,740 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T06:51:50,741 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T06:51:50,742 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T06:51:50,743 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T06:51:50,743 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T06:51:50,744 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40219,1733554307838-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:50,744 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40219,1733554307838-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T06:51:50,749 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T06:51:50,749 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T06:51:50,750 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40219,1733554307838-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:50,831 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a500405, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T06:51:50,836 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T06:51:50,836 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T06:51:50,840 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 61c02eafbb40,40219,-1 for getting cluster id 2024-12-07T06:51:50,843 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T06:51:50,852 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd0774f96-b726-4663-868e-6ff272f6b37e' 2024-12-07T06:51:50,854 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T06:51:50,855 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d0774f96-b726-4663-868e-6ff272f6b37e" 2024-12-07T06:51:50,857 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11b638c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T06:51:50,857 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [61c02eafbb40,40219,-1] 2024-12-07T06:51:50,860 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T06:51:50,862 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:50,863 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T06:51:50,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5872d287, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T06:51:50,866 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T06:51:50,873 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=61c02eafbb40,38361,1733554308698, seqNum=-1] 2024-12-07T06:51:50,873 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T06:51:50,876 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59052, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T06:51:50,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=61c02eafbb40,40219,1733554307838 2024-12-07T06:51:50,927 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T06:51:50,931 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 61c02eafbb40,40219,1733554307838 2024-12-07T06:51:50,934 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2e486041 2024-12-07T06:51:50,935 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T06:51:50,937 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T06:51:50,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T06:51:50,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T06:51:50,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T06:51:50,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T06:51:50,960 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:50,962 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T06:51:50,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:50,972 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:50,972 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:50,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:47162 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47162 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:50,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-07T06:51:50,986 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:50,989 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4be06957107ef2f83db89117938f7393, NAME => 'TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313 2024-12-07T06:51:51,005 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:51,005 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:51,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:47174 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47174 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:51,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-07T06:51:51,024 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:51,025 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:51,025 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4be06957107ef2f83db89117938f7393, disabling compactions & flushes 2024-12-07T06:51:51,025 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,025 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,025 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. after waiting 0 ms 2024-12-07T06:51:51,025 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,025 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,025 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4be06957107ef2f83db89117938f7393: Waiting for close lock at 1733554311025Disabling compacts and flushes for region at 1733554311025Disabling writes for close at 1733554311025Writing region close event to WAL at 1733554311025Closed at 1733554311025 2024-12-07T06:51:51,028 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T06:51:51,033 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733554311028"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733554311028"}]},"ts":"1733554311028"} 2024-12-07T06:51:51,038 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T06:51:51,045 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T06:51:51,048 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733554311045"}]},"ts":"1733554311045"} 2024-12-07T06:51:51,053 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T06:51:51,053 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {61c02eafbb40=0} racks are {/default-rack=0} 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T06:51:51,055 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T06:51:51,055 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T06:51:51,055 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T06:51:51,055 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T06:51:51,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4be06957107ef2f83db89117938f7393, ASSIGN}] 2024-12-07T06:51:51,058 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4be06957107ef2f83db89117938f7393, ASSIGN 2024-12-07T06:51:51,060 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4be06957107ef2f83db89117938f7393, ASSIGN; state=OFFLINE, location=61c02eafbb40,38361,1733554308698; forceNewPlan=false, retain=false 2024-12-07T06:51:51,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:51,214 INFO [61c02eafbb40:40219 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T06:51:51,215 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4be06957107ef2f83db89117938f7393, regionState=OPENING, regionLocation=61c02eafbb40,38361,1733554308698 2024-12-07T06:51:51,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4be06957107ef2f83db89117938f7393, ASSIGN because future has completed 2024-12-07T06:51:51,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4be06957107ef2f83db89117938f7393, server=61c02eafbb40,38361,1733554308698}] 2024-12-07T06:51:51,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:51,380 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,380 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4be06957107ef2f83db89117938f7393, NAME => 'TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393.', STARTKEY => '', ENDKEY => ''} 2024-12-07T06:51:51,381 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,381 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:51,381 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,381 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,383 INFO [StoreOpener-4be06957107ef2f83db89117938f7393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,386 INFO [StoreOpener-4be06957107ef2f83db89117938f7393-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4be06957107ef2f83db89117938f7393 columnFamilyName cf 2024-12-07T06:51:51,386 DEBUG [StoreOpener-4be06957107ef2f83db89117938f7393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:51,387 INFO [StoreOpener-4be06957107ef2f83db89117938f7393-1 {}] regionserver.HStore(327): Store=4be06957107ef2f83db89117938f7393/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:51,387 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,389 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,389 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,390 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,390 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,393 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,399 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T06:51:51,400 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4be06957107ef2f83db89117938f7393; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71406297, jitterRate=0.06403674185276031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T06:51:51,400 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:51,400 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4be06957107ef2f83db89117938f7393: Running coprocessor pre-open hook at 1733554311381Writing region info on filesystem at 1733554311381Initializing all the Stores at 1733554311383 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554311383Cleaning up temporary data from old regions at 1733554311390 (+7 ms)Running coprocessor post-open hooks at 1733554311400 (+10 ms)Region opened successfully at 1733554311400 2024-12-07T06:51:51,402 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393., pid=6, masterSystemTime=1733554311374 2024-12-07T06:51:51,406 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,406 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,407 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4be06957107ef2f83db89117938f7393, regionState=OPEN, openSeqNum=2, regionLocation=61c02eafbb40,38361,1733554308698 2024-12-07T06:51:51,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4be06957107ef2f83db89117938f7393, server=61c02eafbb40,38361,1733554308698 because future has completed 2024-12-07T06:51:51,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T06:51:51,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4be06957107ef2f83db89117938f7393, server=61c02eafbb40,38361,1733554308698 in 194 msec 2024-12-07T06:51:51,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T06:51:51,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4be06957107ef2f83db89117938f7393, ASSIGN in 362 msec 2024-12-07T06:51:51,423 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T06:51:51,424 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733554311423"}]},"ts":"1733554311423"} 2024-12-07T06:51:51,426 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T06:51:51,428 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T06:51:51,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 480 msec 2024-12-07T06:51:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:51,591 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T06:51:51,591 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T06:51:51,592 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T06:51:51,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T06:51:51,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T06:51:51,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T06:51:51,605 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393., hostname=61c02eafbb40,38361,1733554308698, seqNum=2] 2024-12-07T06:51:51,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T06:51:51,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T06:51:51,622 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T06:51:51,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:51,623 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T06:51:51,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T06:51:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:51,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38361 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T06:51:51,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:51,790 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4be06957107ef2f83db89117938f7393 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T06:51:51,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/.tmp/cf/7dd209e2e8494dfb945810ba5154adc4 is 36, key is row/cf:cq/1733554311607/Put/seqid=0 2024-12-07T06:51:51,852 WARN [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:51,852 WARN [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:51,856 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1277850610_22 at /127.0.0.1:35166 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35166 dst: /127.0.0.1:39505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:51,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-07T06:51:51,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:52,262 WARN [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:52,262 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/.tmp/cf/7dd209e2e8494dfb945810ba5154adc4 2024-12-07T06:51:52,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/.tmp/cf/7dd209e2e8494dfb945810ba5154adc4 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/cf/7dd209e2e8494dfb945810ba5154adc4 2024-12-07T06:51:52,314 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/cf/7dd209e2e8494dfb945810ba5154adc4, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T06:51:52,321 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4be06957107ef2f83db89117938f7393 in 530ms, sequenceid=5, compaction requested=false 2024-12-07T06:51:52,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-07T06:51:52,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4be06957107ef2f83db89117938f7393: 2024-12-07T06:51:52,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:52,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T06:51:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T06:51:52,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T06:51:52,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 705 msec 2024-12-07T06:51:52,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 720 msec 2024-12-07T06:51:52,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-07T06:51:52,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-07T06:51:52,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-07T06:51:52,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-07T06:51:52,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-07T06:51:52,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-07T06:51:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-07T06:51:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-07T06:51:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-07T06:51:52,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-07T06:51:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:52,761 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T06:51:52,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T06:51:52,774 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T06:51:52,774 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:52,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,779 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T06:51:52,779 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T06:51:52,779 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=764753395, stopped=false 2024-12-07T06:51:52,780 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=61c02eafbb40,40219,1733554307838 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:52,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:52,782 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T06:51:52,783 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T06:51:52,783 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:52,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:52,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:52,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:52,784 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:52,784 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '61c02eafbb40,39579,1733554308555' ***** 2024-12-07T06:51:52,784 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T06:51:52,784 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '61c02eafbb40,40491,1733554308661' ***** 2024-12-07T06:51:52,784 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T06:51:52,784 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '61c02eafbb40,38361,1733554308698' ***** 2024-12-07T06:51:52,784 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T06:51:52,784 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T06:51:52,784 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T06:51:52,784 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T06:51:52,785 INFO [RS:2;61c02eafbb40:38361 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T06:51:52,785 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T06:51:52,785 INFO [RS:0;61c02eafbb40:39579 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T06:51:52,785 INFO [RS:2;61c02eafbb40:38361 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T06:51:52,785 INFO [RS:1;61c02eafbb40:40491 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T06:51:52,785 INFO [RS:0;61c02eafbb40:39579 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T06:51:52,785 INFO [RS:1;61c02eafbb40:40491 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T06:51:52,785 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(959): stopping server 61c02eafbb40,39579,1733554308555 2024-12-07T06:51:52,785 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(959): stopping server 61c02eafbb40,40491,1733554308661 2024-12-07T06:51:52,785 INFO [RS:1;61c02eafbb40:40491 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:52,785 INFO [RS:0;61c02eafbb40:39579 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:52,785 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T06:51:52,785 INFO [RS:1;61c02eafbb40:40491 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;61c02eafbb40:40491. 2024-12-07T06:51:52,785 INFO [RS:0;61c02eafbb40:39579 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;61c02eafbb40:39579. 2024-12-07T06:51:52,785 DEBUG [RS:1;61c02eafbb40:40491 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:52,785 DEBUG [RS:1;61c02eafbb40:40491 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,785 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T06:51:52,786 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(976): stopping server 61c02eafbb40,40491,1733554308661; all regions closed. 2024-12-07T06:51:52,786 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(3091): Received CLOSE for 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:52,786 DEBUG [RS:0;61c02eafbb40:39579 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:52,786 DEBUG [RS:0;61c02eafbb40:39579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,786 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(976): stopping server 61c02eafbb40,39579,1733554308555; all regions closed. 2024-12-07T06:51:52,786 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(959): stopping server 61c02eafbb40,38361,1733554308698 2024-12-07T06:51:52,786 INFO [RS:2;61c02eafbb40:38361 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:52,786 INFO [RS:2;61c02eafbb40:38361 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;61c02eafbb40:38361. 2024-12-07T06:51:52,786 DEBUG [RS:2;61c02eafbb40:38361 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:52,787 DEBUG [RS:2;61c02eafbb40:38361 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,787 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4be06957107ef2f83db89117938f7393, disabling compactions & flushes 2024-12-07T06:51:52,787 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T06:51:52,787 INFO [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:52,787 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T06:51:52,787 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:52,787 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T06:51:52,787 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. after waiting 0 ms 2024-12-07T06:51:52,787 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T06:51:52,787 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:52,788 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T06:51:52,788 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4be06957107ef2f83db89117938f7393=TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393.} 2024-12-07T06:51:52,788 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T06:51:52,789 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T06:51:52,789 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T06:51:52,789 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T06:51:52,789 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4be06957107ef2f83db89117938f7393 2024-12-07T06:51:52,789 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T06:51:52,789 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T06:51:52,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_1073741826_1016 (size=93) 2024-12-07T06:51:52,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_1073741826_1016 (size=93) 2024-12-07T06:51:52,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741827_1017 (size=93) 2024-12-07T06:51:52,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_1073741827_1017 (size=93) 2024-12-07T06:51:52,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_1073741827_1017 (size=93) 2024-12-07T06:51:52,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741826_1016 (size=93) 2024-12-07T06:51:52,800 DEBUG [RS:0;61c02eafbb40:39579 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs 2024-12-07T06:51:52,800 INFO [RS:0;61c02eafbb40:39579 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 61c02eafbb40%2C39579%2C1733554308555:(num 1733554310017) 2024-12-07T06:51:52,800 DEBUG [RS:0;61c02eafbb40:39579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,800 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:52,800 DEBUG [RS:1;61c02eafbb40:40491 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs 2024-12-07T06:51:52,800 INFO [RS:1;61c02eafbb40:40491 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 61c02eafbb40%2C40491%2C1733554308661:(num 1733554310017) 2024-12-07T06:51:52,800 DEBUG [RS:1;61c02eafbb40:40491 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:52,800 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:52,800 INFO [RS:0;61c02eafbb40:39579 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:52,800 INFO [RS:1;61c02eafbb40:40491 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:52,801 INFO [RS:0;61c02eafbb40:39579 {}] hbase.ChoreService(370): Chore service for: regionserver/61c02eafbb40:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:52,801 INFO [RS:1;61c02eafbb40:40491 {}] hbase.ChoreService(370): Chore service for: regionserver/61c02eafbb40:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:52,801 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T06:51:52,801 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T06:51:52,801 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T06:51:52,801 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T06:51:52,801 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T06:51:52,801 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T06:51:52,801 INFO [RS:1;61c02eafbb40:40491 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:52,801 INFO [RS:0;61c02eafbb40:39579 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:52,801 INFO [regionserver/61c02eafbb40:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:52,801 INFO [RS:0;61c02eafbb40:39579 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39579 2024-12-07T06:51:52,801 INFO [RS:1;61c02eafbb40:40491 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40491 2024-12-07T06:51:52,803 INFO [regionserver/61c02eafbb40:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/61c02eafbb40,40491,1733554308661 2024-12-07T06:51:52,805 INFO [RS:1;61c02eafbb40:40491 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:52,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/61c02eafbb40,39579,1733554308555 2024-12-07T06:51:52,806 INFO [RS:0;61c02eafbb40:39579 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:52,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [61c02eafbb40,39579,1733554308555] 2024-12-07T06:51:52,812 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/61c02eafbb40,39579,1733554308555 already deleted, retry=false 2024-12-07T06:51:52,812 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 61c02eafbb40,39579,1733554308555 expired; onlineServers=2 2024-12-07T06:51:52,812 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [61c02eafbb40,40491,1733554308661] 2024-12-07T06:51:52,813 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/61c02eafbb40,40491,1733554308661 already deleted, retry=false 2024-12-07T06:51:52,813 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 61c02eafbb40,40491,1733554308661 expired; onlineServers=1 2024-12-07T06:51:52,818 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/default/TestHBaseWalOnEC/4be06957107ef2f83db89117938f7393/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T06:51:52,821 INFO [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:52,821 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4be06957107ef2f83db89117938f7393: Waiting for close lock at 1733554312786Running coprocessor pre-close hooks at 1733554312787 (+1 ms)Disabling compacts and flushes for region at 1733554312787Disabling writes for close at 1733554312787Writing region close event to WAL at 1733554312789 (+2 ms)Running coprocessor post-close hooks at 1733554312819 (+30 ms)Closed at 1733554312821 (+2 ms) 2024-12-07T06:51:52,821 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393. 2024-12-07T06:51:52,838 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/info/56c4f41ae3a74eb3ba8196f68cd91f09 is 153, key is TestHBaseWalOnEC,,1733554310939.4be06957107ef2f83db89117938f7393./info:regioninfo/1733554311407/Put/seqid=0 2024-12-07T06:51:52,842 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:52,842 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:52,846 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1277850610_22 at /127.0.0.1:47234 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47234 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:52,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-07T06:51:52,851 INFO [regionserver/61c02eafbb40:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T06:51:52,851 INFO [regionserver/61c02eafbb40:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T06:51:52,852 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:52,852 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/info/56c4f41ae3a74eb3ba8196f68cd91f09 2024-12-07T06:51:52,855 INFO [regionserver/61c02eafbb40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:52,860 INFO [regionserver/61c02eafbb40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:52,861 INFO [regionserver/61c02eafbb40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:52,882 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/ns/578b16100cce49408a2e3ddac55d9954 is 43, key is default/ns:d/1733554310646/Put/seqid=0 2024-12-07T06:51:52,885 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:52,885 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:52,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1277850610_22 at /127.0.0.1:47266 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47266 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:52,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-07T06:51:52,898 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:52,899 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/ns/578b16100cce49408a2e3ddac55d9954 2024-12-07T06:51:52,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:52,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40491-0x1018bb16a660002, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:52,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:52,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39579-0x1018bb16a660001, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:52,909 INFO [RS:0;61c02eafbb40:39579 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:52,909 INFO [RS:1;61c02eafbb40:40491 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:52,909 INFO [RS:1;61c02eafbb40:40491 {}] regionserver.HRegionServer(1031): Exiting; stopping=61c02eafbb40,40491,1733554308661; zookeeper connection closed. 2024-12-07T06:51:52,909 INFO [RS:0;61c02eafbb40:39579 {}] regionserver.HRegionServer(1031): Exiting; stopping=61c02eafbb40,39579,1733554308555; zookeeper connection closed. 2024-12-07T06:51:52,909 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f29ddc5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f29ddc5 2024-12-07T06:51:52,909 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4637066b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4637066b 2024-12-07T06:51:52,926 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/table/b30eba87083c4e2e961c8dbfeed148a4 is 52, key is TestHBaseWalOnEC/table:state/1733554311423/Put/seqid=0 2024-12-07T06:51:52,929 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:52,929 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:52,934 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1277850610_22 at /127.0.0.1:35234 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:39505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35234 dst: /127.0.0.1:39505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:52,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-07T06:51:52,941 WARN [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:52,941 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/table/b30eba87083c4e2e961c8dbfeed148a4 2024-12-07T06:51:52,953 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/info/56c4f41ae3a74eb3ba8196f68cd91f09 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/info/56c4f41ae3a74eb3ba8196f68cd91f09 2024-12-07T06:51:52,962 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/info/56c4f41ae3a74eb3ba8196f68cd91f09, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T06:51:52,964 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/ns/578b16100cce49408a2e3ddac55d9954 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/ns/578b16100cce49408a2e3ddac55d9954 2024-12-07T06:51:52,972 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/ns/578b16100cce49408a2e3ddac55d9954, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T06:51:52,974 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/.tmp/table/b30eba87083c4e2e961c8dbfeed148a4 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/table/b30eba87083c4e2e961c8dbfeed148a4 2024-12-07T06:51:52,983 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/table/b30eba87083c4e2e961c8dbfeed148a4, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T06:51:52,986 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 197ms, sequenceid=11, compaction requested=false 2024-12-07T06:51:52,986 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T06:51:52,989 DEBUG [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T06:51:53,013 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T06:51:53,014 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T06:51:53,014 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T06:51:53,014 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733554312788Running coprocessor pre-close hooks at 1733554312788Disabling compacts and flushes for region at 1733554312788Disabling writes for close at 1733554312789 (+1 ms)Obtaining lock to block concurrent updates at 1733554312789Preparing flush snapshotting stores in 1588230740 at 1733554312789Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733554312790 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733554312794 (+4 ms)Flushing 1588230740/info: creating writer at 1733554312794Flushing 1588230740/info: appending metadata at 1733554312835 (+41 ms)Flushing 1588230740/info: closing flushed file at 1733554312835Flushing 1588230740/ns: creating writer at 1733554312864 (+29 ms)Flushing 1588230740/ns: appending metadata at 1733554312881 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733554312882 (+1 ms)Flushing 1588230740/table: creating writer at 1733554312908 (+26 ms)Flushing 1588230740/table: appending metadata at 1733554312926 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733554312926Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7080841e: reopening flushed file at 1733554312951 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ea48c9c: reopening flushed file at 1733554312962 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ac5cb76: reopening flushed file at 1733554312973 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 197ms, sequenceid=11, compaction requested=false at 1733554312986 (+13 ms)Writing region close event to WAL at 1733554312995 (+9 ms)Running coprocessor post-close hooks at 1733554313014 (+19 ms)Closed at 1733554313014 2024-12-07T06:51:53,014 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T06:51:53,189 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(976): stopping server 61c02eafbb40,38361,1733554308698; all regions closed. 2024-12-07T06:51:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741829_1019 (size=2751) 2024-12-07T06:51:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_1073741829_1019 (size=2751) 2024-12-07T06:51:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_1073741829_1019 (size=2751) 2024-12-07T06:51:53,197 DEBUG [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs 2024-12-07T06:51:53,197 INFO [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 61c02eafbb40%2C38361%2C1733554308698.meta:.meta(num 1733554310495) 2024-12-07T06:51:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_1073741828_1018 (size=1298) 2024-12-07T06:51:53,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741828_1018 (size=1298) 2024-12-07T06:51:53,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_1073741828_1018 (size=1298) 2024-12-07T06:51:53,202 DEBUG [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/oldWALs 2024-12-07T06:51:53,202 INFO [RS:2;61c02eafbb40:38361 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 61c02eafbb40%2C38361%2C1733554308698:(num 1733554310017) 2024-12-07T06:51:53,202 DEBUG [RS:2;61c02eafbb40:38361 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:53,203 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:53,203 INFO [RS:2;61c02eafbb40:38361 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:53,203 INFO [RS:2;61c02eafbb40:38361 {}] hbase.ChoreService(370): Chore service for: regionserver/61c02eafbb40:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:53,203 INFO [RS:2;61c02eafbb40:38361 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:53,203 INFO [regionserver/61c02eafbb40:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:53,203 INFO [RS:2;61c02eafbb40:38361 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38361 2024-12-07T06:51:53,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/61c02eafbb40,38361,1733554308698 2024-12-07T06:51:53,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:53,206 INFO [RS:2;61c02eafbb40:38361 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:53,208 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [61c02eafbb40,38361,1733554308698] 2024-12-07T06:51:53,211 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/61c02eafbb40,38361,1733554308698 already deleted, retry=false 2024-12-07T06:51:53,211 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 61c02eafbb40,38361,1733554308698 expired; onlineServers=0 2024-12-07T06:51:53,211 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '61c02eafbb40,40219,1733554307838' ***** 2024-12-07T06:51:53,211 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T06:51:53,211 INFO [M:0;61c02eafbb40:40219 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:53,211 INFO [M:0;61c02eafbb40:40219 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:53,212 DEBUG [M:0;61c02eafbb40:40219 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T06:51:53,212 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T06:51:53,212 DEBUG [M:0;61c02eafbb40:40219 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T06:51:53,212 DEBUG [master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.small.0-1733554309709 {}] cleaner.HFileCleaner(306): Exit Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.small.0-1733554309709,5,FailOnTimeoutGroup] 2024-12-07T06:51:53,212 DEBUG [master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.large.0-1733554309698 {}] cleaner.HFileCleaner(306): Exit Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.large.0-1733554309698,5,FailOnTimeoutGroup] 2024-12-07T06:51:53,212 INFO [M:0;61c02eafbb40:40219 {}] hbase.ChoreService(370): Chore service for: master/61c02eafbb40:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:53,212 INFO [M:0;61c02eafbb40:40219 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:53,212 DEBUG [M:0;61c02eafbb40:40219 {}] master.HMaster(1795): Stopping service threads 2024-12-07T06:51:53,212 INFO [M:0;61c02eafbb40:40219 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T06:51:53,213 INFO [M:0;61c02eafbb40:40219 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T06:51:53,213 INFO [M:0;61c02eafbb40:40219 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T06:51:53,214 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T06:51:53,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:53,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:53,214 DEBUG [M:0;61c02eafbb40:40219 {}] zookeeper.ZKUtil(347): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T06:51:53,214 WARN [M:0;61c02eafbb40:40219 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T06:51:53,215 INFO [M:0;61c02eafbb40:40219 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/.lastflushedseqids 2024-12-07T06:51:53,225 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,225 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:45348 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45348 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:53,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-07T06:51:53,231 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:53,232 INFO [M:0;61c02eafbb40:40219 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T06:51:53,232 INFO [M:0;61c02eafbb40:40219 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T06:51:53,232 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T06:51:53,232 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:53,232 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:53,232 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T06:51:53,232 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:53,232 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-12-07T06:51:53,252 DEBUG [M:0;61c02eafbb40:40219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09091edfbe114e1babb22c628267f5e3 is 82, key is hbase:meta,,1/info:regioninfo/1733554310579/Put/seqid=0 2024-12-07T06:51:53,254 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,254 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:45362 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45362 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:53,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-07T06:51:53,261 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:53,261 INFO [M:0;61c02eafbb40:40219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09091edfbe114e1babb22c628267f5e3 2024-12-07T06:51:53,287 DEBUG [M:0;61c02eafbb40:40219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a88f5d713c074b2095c9dd9c511fae66 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733554311429/Put/seqid=0 2024-12-07T06:51:53,289 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,289 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,291 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:47280 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47280 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:53,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775552_1037 (size=6437) 2024-12-07T06:51:53,296 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:53,296 INFO [M:0;61c02eafbb40:40219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a88f5d713c074b2095c9dd9c511fae66 2024-12-07T06:51:53,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:53,308 INFO [RS:2;61c02eafbb40:38361 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:53,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38361-0x1018bb16a660003, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:53,308 INFO [RS:2;61c02eafbb40:38361 {}] regionserver.HRegionServer(1031): Exiting; stopping=61c02eafbb40,38361,1733554308698; zookeeper connection closed. 2024-12-07T06:51:53,308 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@57ec5b5b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@57ec5b5b 2024-12-07T06:51:53,309 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T06:51:53,320 DEBUG [M:0;61c02eafbb40:40219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bbef07e18124406c9a5af83eff289829 is 69, key is 61c02eafbb40,38361,1733554308698/rs:state/1733554309757/Put/seqid=0 2024-12-07T06:51:53,322 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,322 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T06:51:53,325 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1570030418_22 at /127.0.0.1:47292 [Receiving block BP-264701312-172.17.0.2-1733554304606:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47292 dst: /127.0.0.1:41381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T06:51:53,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-07T06:51:53,329 WARN [M:0;61c02eafbb40:40219 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T06:51:53,329 INFO [M:0;61c02eafbb40:40219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bbef07e18124406c9a5af83eff289829 2024-12-07T06:51:53,339 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09091edfbe114e1babb22c628267f5e3 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/09091edfbe114e1babb22c628267f5e3 2024-12-07T06:51:53,347 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/09091edfbe114e1babb22c628267f5e3, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T06:51:53,349 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a88f5d713c074b2095c9dd9c511fae66 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a88f5d713c074b2095c9dd9c511fae66 2024-12-07T06:51:53,357 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a88f5d713c074b2095c9dd9c511fae66, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T06:51:53,358 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bbef07e18124406c9a5af83eff289829 as hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bbef07e18124406c9a5af83eff289829 2024-12-07T06:51:53,365 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bbef07e18124406c9a5af83eff289829, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T06:51:53,367 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=72, compaction requested=false 2024-12-07T06:51:53,368 INFO [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:53,368 DEBUG [M:0;61c02eafbb40:40219 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733554313232Disabling compacts and flushes for region at 1733554313232Disabling writes for close at 1733554313232Obtaining lock to block concurrent updates at 1733554313232Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733554313232Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1733554313233 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733554313234 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733554313234Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733554313251 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733554313251Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733554313269 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733554313286 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733554313286Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733554313304 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733554313320 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733554313320Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@646dca1b: reopening flushed file at 1733554313337 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@205cab81: reopening flushed file at 1733554313348 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65a9b8c0: reopening flushed file at 1733554313357 (+9 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=72, compaction requested=false at 1733554313367 (+10 ms)Writing region close event to WAL at 1733554313368 (+1 ms)Closed at 1733554313368 2024-12-07T06:51:53,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39505 is added to blk_1073741825_1011 (size=32653) 2024-12-07T06:51:53,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41381 is added to blk_1073741825_1011 (size=32653) 2024-12-07T06:51:53,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741825_1011 (size=32653) 2024-12-07T06:51:53,373 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:53,373 INFO [M:0;61c02eafbb40:40219 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T06:51:53,373 INFO [M:0;61c02eafbb40:40219 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40219 2024-12-07T06:51:53,373 INFO [M:0;61c02eafbb40:40219 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:53,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:53,475 INFO [M:0;61c02eafbb40:40219 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:53,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40219-0x1018bb16a660000, quorum=127.0.0.1:56718, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:53,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:53,482 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:53,482 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:53,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:53,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:53,485 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T06:51:53,485 WARN [BP-264701312-172.17.0.2-1733554304606 heartbeating to localhost/127.0.0.1:40093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T06:51:53,485 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T06:51:53,485 WARN [BP-264701312-172.17.0.2-1733554304606 heartbeating to localhost/127.0.0.1:40093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-264701312-172.17.0.2-1733554304606 (Datanode Uuid 4e00dcf7-9789-4453-bcb8-9aa883f42e96) service to localhost/127.0.0.1:40093 2024-12-07T06:51:53,486 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data5/current/BP-264701312-172.17.0.2-1733554304606 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:53,486 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data6/current/BP-264701312-172.17.0.2-1733554304606 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:53,487 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T06:51:53,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:53,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:53,490 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:53,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:53,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:53,492 WARN [BP-264701312-172.17.0.2-1733554304606 heartbeating to localhost/127.0.0.1:40093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T06:51:53,492 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T06:51:53,492 WARN [BP-264701312-172.17.0.2-1733554304606 heartbeating to localhost/127.0.0.1:40093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-264701312-172.17.0.2-1733554304606 (Datanode Uuid 7dd2d5a4-4761-49ce-bfb5-d88522fbc13d) service to localhost/127.0.0.1:40093 2024-12-07T06:51:53,492 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T06:51:53,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data3/current/BP-264701312-172.17.0.2-1733554304606 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:53,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data4/current/BP-264701312-172.17.0.2-1733554304606 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:53,493 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T06:51:53,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:53,495 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:53,495 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:53,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:53,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:53,497 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T06:51:53,497 WARN [BP-264701312-172.17.0.2-1733554304606 heartbeating to localhost/127.0.0.1:40093 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T06:51:53,497 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T06:51:53,497 WARN [BP-264701312-172.17.0.2-1733554304606 heartbeating to localhost/127.0.0.1:40093 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-264701312-172.17.0.2-1733554304606 (Datanode Uuid 1ee0cd3f-06af-4b67-aa59-7d3751ac7ecb) service to localhost/127.0.0.1:40093 2024-12-07T06:51:53,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data1/current/BP-264701312-172.17.0.2-1733554304606 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:53,498 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/cluster_0e5f2880-c435-a0d9-3380-b784b11e7d6f/data/data2/current/BP-264701312-172.17.0.2-1733554304606 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:53,498 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T06:51:53,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T06:51:53,507 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:53,507 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:53,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:53,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:53,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T06:51:53,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T06:51:53,551 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=87 (was 160), OpenFileDescriptor=453 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=192 (was 209), ProcessCount=11 (was 11), AvailableMemoryMB=6589 (was 6911) 2024-12-07T06:51:53,558 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=87, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=192, ProcessCount=11, AvailableMemoryMB=6589 2024-12-07T06:51:53,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T06:51:53,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.log.dir so I do NOT create it in target/test-data/37357f55-a14c-0088-e8da-776df44b915b 2024-12-07T06:51:53,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8a776e4a-4c3c-d797-073f-8f8e6a0fa752/hadoop.tmp.dir so I do NOT create it in target/test-data/37357f55-a14c-0088-e8da-776df44b915b 2024-12-07T06:51:53,558 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c, deleteOnExit=true 2024-12-07T06:51:53,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/test.cache.data in system properties and HBase conf 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir in system properties and HBase conf 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T06:51:53,559 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T06:51:53,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/nfs.dump.dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/java.io.tmpdir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T06:51:53,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T06:51:53,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:53,669 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:53,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:53,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:53,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T06:51:53,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:53,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1921d73d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:53,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@117b7671{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:53,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d637fa1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/java.io.tmpdir/jetty-localhost-41357-hadoop-hdfs-3_4_1-tests_jar-_-any-13146613808981636682/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T06:51:53,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6684a7bb{HTTP/1.1, (http/1.1)}{localhost:41357} 2024-12-07T06:51:53,789 INFO [Time-limited test {}] server.Server(415): Started @11175ms 2024-12-07T06:51:53,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:53,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:53,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:53,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:53,879 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T06:51:53,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c867c3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:53,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ef18cbe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:54,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4072566{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/java.io.tmpdir/jetty-localhost-44601-hadoop-hdfs-3_4_1-tests_jar-_-any-17890753798674750638/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:54,018 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6002231{HTTP/1.1, (http/1.1)}{localhost:44601} 2024-12-07T06:51:54,018 INFO [Time-limited test {}] server.Server(415): Started @11405ms 2024-12-07T06:51:54,019 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T06:51:54,060 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:54,063 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:54,064 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:54,064 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:54,064 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T06:51:54,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a71642{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:54,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6de03e39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:54,133 WARN [Thread-528 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data2/current/BP-1161046142-172.17.0.2-1733554313592/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:54,133 WARN [Thread-527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data1/current/BP-1161046142-172.17.0.2-1733554313592/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:54,156 WARN [Thread-506 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T06:51:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69d380a26e6ce592 with lease ID 0x3f13c4c04a138b26: Processing first storage report for DS-da56b2cd-7eb1-4508-b8bb-db9c2e82fb4d from datanode DatanodeRegistration(127.0.0.1:45207, datanodeUuid=8a5a4007-6645-43f4-9aa3-3aa0996c0309, infoPort=38701, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592) 2024-12-07T06:51:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69d380a26e6ce592 with lease ID 0x3f13c4c04a138b26: from storage DS-da56b2cd-7eb1-4508-b8bb-db9c2e82fb4d node DatanodeRegistration(127.0.0.1:45207, datanodeUuid=8a5a4007-6645-43f4-9aa3-3aa0996c0309, infoPort=38701, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69d380a26e6ce592 with lease ID 0x3f13c4c04a138b26: Processing first storage report for DS-9f84c3d2-d6f0-4eb0-8134-0d231a33d3b3 from datanode DatanodeRegistration(127.0.0.1:45207, datanodeUuid=8a5a4007-6645-43f4-9aa3-3aa0996c0309, infoPort=38701, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592) 2024-12-07T06:51:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69d380a26e6ce592 with lease ID 0x3f13c4c04a138b26: from storage DS-9f84c3d2-d6f0-4eb0-8134-0d231a33d3b3 node DatanodeRegistration(127.0.0.1:45207, datanodeUuid=8a5a4007-6645-43f4-9aa3-3aa0996c0309, infoPort=38701, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:54,195 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f2c6971{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/java.io.tmpdir/jetty-localhost-45209-hadoop-hdfs-3_4_1-tests_jar-_-any-782528936043065451/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:54,195 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78adaad1{HTTP/1.1, (http/1.1)}{localhost:45209} 2024-12-07T06:51:54,195 INFO [Time-limited test {}] server.Server(415): Started @11582ms 2024-12-07T06:51:54,197 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T06:51:54,228 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T06:51:54,231 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T06:51:54,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T06:51:54,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T06:51:54,232 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T06:51:54,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3beb2b8e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,AVAILABLE} 2024-12-07T06:51:54,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@611fd8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T06:51:54,292 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data3/current/BP-1161046142-172.17.0.2-1733554313592/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:54,292 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data4/current/BP-1161046142-172.17.0.2-1733554313592/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:54,309 WARN [Thread-542 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T06:51:54,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x106fda40bfc680d3 with lease ID 0x3f13c4c04a138b27: Processing first storage report for DS-436a68f6-33f5-4a9f-b72f-d8b01a00dffc from datanode DatanodeRegistration(127.0.0.1:42367, datanodeUuid=19434a39-e1a3-454c-bfe2-49a0599ef657, infoPort=38599, infoSecurePort=0, ipcPort=39675, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592) 2024-12-07T06:51:54,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x106fda40bfc680d3 with lease ID 0x3f13c4c04a138b27: from storage DS-436a68f6-33f5-4a9f-b72f-d8b01a00dffc node DatanodeRegistration(127.0.0.1:42367, datanodeUuid=19434a39-e1a3-454c-bfe2-49a0599ef657, infoPort=38599, infoSecurePort=0, ipcPort=39675, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:54,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x106fda40bfc680d3 with lease ID 0x3f13c4c04a138b27: Processing first storage report for DS-6f4ab651-301c-4d4b-9a4c-49f176d97c2f from datanode DatanodeRegistration(127.0.0.1:42367, datanodeUuid=19434a39-e1a3-454c-bfe2-49a0599ef657, infoPort=38599, infoSecurePort=0, ipcPort=39675, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592) 2024-12-07T06:51:54,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x106fda40bfc680d3 with lease ID 0x3f13c4c04a138b27: from storage DS-6f4ab651-301c-4d4b-9a4c-49f176d97c2f node DatanodeRegistration(127.0.0.1:42367, datanodeUuid=19434a39-e1a3-454c-bfe2-49a0599ef657, infoPort=38599, infoSecurePort=0, ipcPort=39675, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:54,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@395e66b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/java.io.tmpdir/jetty-localhost-42371-hadoop-hdfs-3_4_1-tests_jar-_-any-1116941264560525332/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:54,355 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@765c7210{HTTP/1.1, (http/1.1)}{localhost:42371} 2024-12-07T06:51:54,355 INFO [Time-limited test {}] server.Server(415): Started @11741ms 2024-12-07T06:51:54,357 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T06:51:54,464 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data5/current/BP-1161046142-172.17.0.2-1733554313592/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:54,465 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data6/current/BP-1161046142-172.17.0.2-1733554313592/current, will proceed with Du for space computation calculation, 2024-12-07T06:51:54,483 WARN [Thread-577 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T06:51:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3d36fa94d196bcb8 with lease ID 0x3f13c4c04a138b28: Processing first storage report for DS-742c1910-dbc3-4477-8545-358f6379a21e from datanode DatanodeRegistration(127.0.0.1:46667, datanodeUuid=2754d510-d5cb-4ef8-8831-1bb4a8a6e0c9, infoPort=41107, infoSecurePort=0, ipcPort=44631, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592) 2024-12-07T06:51:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3d36fa94d196bcb8 with lease ID 0x3f13c4c04a138b28: from storage DS-742c1910-dbc3-4477-8545-358f6379a21e node DatanodeRegistration(127.0.0.1:46667, datanodeUuid=2754d510-d5cb-4ef8-8831-1bb4a8a6e0c9, infoPort=41107, infoSecurePort=0, ipcPort=44631, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T06:51:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3d36fa94d196bcb8 with lease ID 0x3f13c4c04a138b28: Processing first storage report for DS-0e71e882-6627-40e9-8cec-be2350e4c415 from datanode DatanodeRegistration(127.0.0.1:46667, datanodeUuid=2754d510-d5cb-4ef8-8831-1bb4a8a6e0c9, infoPort=41107, infoSecurePort=0, ipcPort=44631, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592) 2024-12-07T06:51:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3d36fa94d196bcb8 with lease ID 0x3f13c4c04a138b28: from storage DS-0e71e882-6627-40e9-8cec-be2350e4c415 node DatanodeRegistration(127.0.0.1:46667, datanodeUuid=2754d510-d5cb-4ef8-8831-1bb4a8a6e0c9, infoPort=41107, infoSecurePort=0, ipcPort=44631, storageInfo=lv=-57;cid=testClusterID;nsid=1996869658;c=1733554313592), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T06:51:54,587 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b 2024-12-07T06:51:54,590 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/zookeeper_0, clientPort=60801, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T06:51:54,591 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60801 2024-12-07T06:51:54,591 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,593 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741825_1001 (size=7) 2024-12-07T06:51:54,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741825_1001 (size=7) 2024-12-07T06:51:54,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741825_1001 (size=7) 2024-12-07T06:51:54,607 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315 with version=8 2024-12-07T06:51:54,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40093/user/jenkins/test-data/bad93667-fd29-5b1e-514c-ac50d4f68313/hbase-staging 2024-12-07T06:51:54,610 INFO [Time-limited test {}] client.ConnectionUtils(128): master/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T06:51:54,610 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:54,611 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36067 2024-12-07T06:51:54,612 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36067 connecting to ZooKeeper ensemble=127.0.0.1:60801 2024-12-07T06:51:54,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360670x0, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:54,619 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36067-0x1018bb187ed0000 connected 2024-12-07T06:51:54,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,636 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,638 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:54,638 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315, hbase.cluster.distributed=false 2024-12-07T06:51:54,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:54,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36067 2024-12-07T06:51:54,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36067 2024-12-07T06:51:54,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36067 2024-12-07T06:51:54,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36067 2024-12-07T06:51:54,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36067 2024-12-07T06:51:54,658 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:54,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,659 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:54,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:54,659 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T06:51:54,659 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:54,661 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44403 2024-12-07T06:51:54,662 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44403 connecting to ZooKeeper ensemble=127.0.0.1:60801 2024-12-07T06:51:54,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,665 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444030x0, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:54,671 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44403-0x1018bb187ed0001 connected 2024-12-07T06:51:54,671 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:54,672 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T06:51:54,672 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T06:51:54,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T06:51:54,674 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:54,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44403 2024-12-07T06:51:54,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44403 2024-12-07T06:51:54,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44403 2024-12-07T06:51:54,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44403 2024-12-07T06:51:54,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44403 2024-12-07T06:51:54,692 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:54,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,692 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:54,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:54,692 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T06:51:54,693 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:54,693 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40275 2024-12-07T06:51:54,694 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40275 connecting to ZooKeeper ensemble=127.0.0.1:60801 2024-12-07T06:51:54,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:402750x0, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:54,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:402750x0, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:54,702 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40275-0x1018bb187ed0002 connected 2024-12-07T06:51:54,702 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T06:51:54,703 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T06:51:54,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T06:51:54,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:54,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40275 2024-12-07T06:51:54,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40275 2024-12-07T06:51:54,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40275 2024-12-07T06:51:54,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40275 2024-12-07T06:51:54,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40275 2024-12-07T06:51:54,723 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/61c02eafbb40:0 server-side Connection retries=45 2024-12-07T06:51:54,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,723 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T06:51:54,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T06:51:54,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T06:51:54,724 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T06:51:54,724 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T06:51:54,724 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37067 2024-12-07T06:51:54,725 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37067 connecting to ZooKeeper ensemble=127.0.0.1:60801 2024-12-07T06:51:54,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370670x0, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T06:51:54,732 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:54,732 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37067-0x1018bb187ed0003 connected 2024-12-07T06:51:54,732 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T06:51:54,733 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T06:51:54,733 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T06:51:54,734 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T06:51:54,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37067 2024-12-07T06:51:54,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37067 2024-12-07T06:51:54,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37067 2024-12-07T06:51:54,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37067 2024-12-07T06:51:54,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37067 2024-12-07T06:51:54,747 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;61c02eafbb40:36067 2024-12-07T06:51:54,748 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,750 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:54,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:54,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:54,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,755 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T06:51:54,755 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/61c02eafbb40,36067,1733554314609 from backup master directory 2024-12-07T06:51:54,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,757 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:54,757 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T06:51:54,763 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/hbase.id] with ID: 3463f44f-f7fa-46cf-9c93-04f893623a7c 2024-12-07T06:51:54,763 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/.tmp/hbase.id 2024-12-07T06:51:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741826_1002 (size=42) 2024-12-07T06:51:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741826_1002 (size=42) 2024-12-07T06:51:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741826_1002 (size=42) 2024-12-07T06:51:54,774 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/.tmp/hbase.id]:[hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/hbase.id] 2024-12-07T06:51:54,791 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T06:51:54,791 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T06:51:54,792 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T06:51:54,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741827_1003 (size=196) 2024-12-07T06:51:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741827_1003 (size=196) 2024-12-07T06:51:54,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741827_1003 (size=196) 2024-12-07T06:51:54,804 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T06:51:54,805 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T06:51:54,805 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T06:51:54,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741828_1004 (size=1189) 2024-12-07T06:51:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741828_1004 (size=1189) 2024-12-07T06:51:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741828_1004 (size=1189) 2024-12-07T06:51:54,817 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store 2024-12-07T06:51:54,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741829_1005 (size=34) 2024-12-07T06:51:54,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741829_1005 (size=34) 2024-12-07T06:51:54,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741829_1005 (size=34) 2024-12-07T06:51:54,827 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:54,827 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T06:51:54,827 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:54,827 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:54,827 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T06:51:54,827 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:54,827 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:54,827 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733554314827Disabling compacts and flushes for region at 1733554314827Disabling writes for close at 1733554314827Writing region close event to WAL at 1733554314827Closed at 1733554314827 2024-12-07T06:51:54,828 WARN [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/.initializing 2024-12-07T06:51:54,829 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/WALs/61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,832 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C36067%2C1733554314609, suffix=, logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/WALs/61c02eafbb40,36067,1733554314609, archiveDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/oldWALs, maxLogs=10 2024-12-07T06:51:54,833 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 61c02eafbb40%2C36067%2C1733554314609.1733554314833 2024-12-07T06:51:54,843 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/WALs/61c02eafbb40,36067,1733554314609/61c02eafbb40%2C36067%2C1733554314609.1733554314833 2024-12-07T06:51:54,844 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38701:38701),(127.0.0.1/127.0.0.1:41107:41107),(127.0.0.1/127.0.0.1:38599:38599)] 2024-12-07T06:51:54,845 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T06:51:54,845 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:54,845 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,845 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T06:51:54,849 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:54,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T06:51:54,851 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:54,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T06:51:54,854 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:54,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T06:51:54,856 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:54,857 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,858 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,858 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,860 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,860 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,860 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T06:51:54,862 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T06:51:54,864 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T06:51:54,865 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70460500, jitterRate=0.04994326829910278}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T06:51:54,866 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733554314845Initializing all the Stores at 1733554314847 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554314847Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554314847Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554314847Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554314847Cleaning up temporary data from old regions at 1733554314860 (+13 ms)Region opened successfully at 1733554314866 (+6 ms) 2024-12-07T06:51:54,866 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T06:51:54,870 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55fedf43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:54,871 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T06:51:54,871 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T06:51:54,871 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T06:51:54,871 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T06:51:54,872 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T06:51:54,873 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T06:51:54,873 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T06:51:54,875 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T06:51:54,876 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T06:51:54,877 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T06:51:54,877 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T06:51:54,878 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T06:51:54,881 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T06:51:54,881 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T06:51:54,882 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T06:51:54,883 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T06:51:54,884 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T06:51:54,886 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T06:51:54,888 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T06:51:54,889 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,894 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=61c02eafbb40,36067,1733554314609, sessionid=0x1018bb187ed0000, setting cluster-up flag (Was=false) 2024-12-07T06:51:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,907 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T06:51:54,908 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:54,922 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T06:51:54,923 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=61c02eafbb40,36067,1733554314609 2024-12-07T06:51:54,925 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T06:51:54,927 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:54,928 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T06:51:54,928 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T06:51:54,928 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 61c02eafbb40,36067,1733554314609 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T06:51:54,929 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:54,929 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:54,929 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:54,929 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/61c02eafbb40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T06:51:54,929 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/61c02eafbb40:0, corePoolSize=10, maxPoolSize=10 2024-12-07T06:51:54,930 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:54,930 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:54,930 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:54,930 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733554344930 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T06:51:54,931 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T06:51:54,932 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T06:51:54,932 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T06:51:54,932 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T06:51:54,932 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:54,932 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T06:51:54,932 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.large.0-1733554314932,5,FailOnTimeoutGroup] 2024-12-07T06:51:54,933 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,933 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T06:51:54,934 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.small.0-1733554314932,5,FailOnTimeoutGroup] 2024-12-07T06:51:54,934 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:54,934 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T06:51:54,934 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:54,934 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:54,938 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(746): ClusterId : 3463f44f-f7fa-46cf-9c93-04f893623a7c 2024-12-07T06:51:54,938 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(746): ClusterId : 3463f44f-f7fa-46cf-9c93-04f893623a7c 2024-12-07T06:51:54,938 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(746): ClusterId : 3463f44f-f7fa-46cf-9c93-04f893623a7c 2024-12-07T06:51:54,938 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T06:51:54,938 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T06:51:54,938 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T06:51:54,943 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T06:51:54,943 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T06:51:54,943 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T06:51:54,943 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T06:51:54,943 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T06:51:54,943 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T06:51:54,945 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T06:51:54,946 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T06:51:54,946 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T06:51:54,946 DEBUG [RS:1;61c02eafbb40:40275 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37e76eab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:54,946 DEBUG [RS:2;61c02eafbb40:37067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@292584a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:54,946 DEBUG [RS:0;61c02eafbb40:44403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4086602b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=61c02eafbb40/172.17.0.2:0 2024-12-07T06:51:54,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741831_1007 (size=1321) 2024-12-07T06:51:54,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741831_1007 (size=1321) 2024-12-07T06:51:54,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741831_1007 (size=1321) 2024-12-07T06:51:54,957 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T06:51:54,958 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315 2024-12-07T06:51:54,966 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;61c02eafbb40:40275 2024-12-07T06:51:54,966 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T06:51:54,966 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T06:51:54,966 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T06:51:54,967 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(2659): reportForDuty to master=61c02eafbb40,36067,1733554314609 with port=40275, startcode=1733554314692 2024-12-07T06:51:54,967 DEBUG [RS:1;61c02eafbb40:40275 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T06:51:54,968 DEBUG [RS:2;61c02eafbb40:37067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;61c02eafbb40:37067 2024-12-07T06:51:54,968 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T06:51:54,969 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T06:51:54,969 DEBUG [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T06:51:54,969 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;61c02eafbb40:44403 2024-12-07T06:51:54,969 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T06:51:54,969 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T06:51:54,969 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T06:51:54,969 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(2659): reportForDuty to master=61c02eafbb40,36067,1733554314609 with port=37067, startcode=1733554314723 2024-12-07T06:51:54,970 DEBUG [RS:2;61c02eafbb40:37067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T06:51:54,970 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(2659): reportForDuty to master=61c02eafbb40,36067,1733554314609 with port=44403, startcode=1733554314658 2024-12-07T06:51:54,970 DEBUG [RS:0;61c02eafbb40:44403 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T06:51:54,973 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39143, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T06:51:54,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741832_1008 (size=32) 2024-12-07T06:51:54,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741832_1008 (size=32) 2024-12-07T06:51:54,974 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36067 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 61c02eafbb40,40275,1733554314692 2024-12-07T06:51:54,974 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36067 {}] master.ServerManager(517): Registering regionserver=61c02eafbb40,40275,1733554314692 2024-12-07T06:51:54,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741832_1008 (size=32) 2024-12-07T06:51:54,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:54,975 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36503, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T06:51:54,975 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37079, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T06:51:54,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T06:51:54,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36067 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 61c02eafbb40,44403,1733554314658 2024-12-07T06:51:54,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36067 {}] master.ServerManager(517): Registering regionserver=61c02eafbb40,44403,1733554314658 2024-12-07T06:51:54,978 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315 2024-12-07T06:51:54,978 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37531 2024-12-07T06:51:54,979 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T06:51:54,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T06:51:54,981 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36067 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 61c02eafbb40,37067,1733554314723 2024-12-07T06:51:54,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,981 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36067 {}] master.ServerManager(517): Registering regionserver=61c02eafbb40,37067,1733554314723 2024-12-07T06:51:54,981 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315 2024-12-07T06:51:54,982 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37531 2024-12-07T06:51:54,982 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T06:51:54,982 DEBUG [RS:1;61c02eafbb40:40275 {}] zookeeper.ZKUtil(111): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/61c02eafbb40,40275,1733554314692 2024-12-07T06:51:54,982 WARN [RS:1;61c02eafbb40:40275 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:54,982 INFO [RS:1;61c02eafbb40:40275 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T06:51:54,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:54,982 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,40275,1733554314692 2024-12-07T06:51:54,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T06:51:54,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:54,984 DEBUG [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315 2024-12-07T06:51:54,984 DEBUG [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37531 2024-12-07T06:51:54,984 DEBUG [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T06:51:54,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T06:51:54,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:54,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T06:51:54,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T06:51:54,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:54,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T06:51:54,989 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T06:51:54,989 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:54,989 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [61c02eafbb40,44403,1733554314658] 2024-12-07T06:51:54,989 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [61c02eafbb40,40275,1733554314692] 2024-12-07T06:51:54,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:54,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:54,990 DEBUG [RS:0;61c02eafbb40:44403 {}] zookeeper.ZKUtil(111): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/61c02eafbb40,44403,1733554314658 2024-12-07T06:51:54,990 WARN [RS:0;61c02eafbb40:44403 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:54,990 INFO [RS:0;61c02eafbb40:44403 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T06:51:54,990 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T06:51:54,991 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,44403,1733554314658 2024-12-07T06:51:54,991 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [61c02eafbb40,37067,1733554314723] 2024-12-07T06:51:54,991 DEBUG [RS:2;61c02eafbb40:37067 {}] zookeeper.ZKUtil(111): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/61c02eafbb40,37067,1733554314723 2024-12-07T06:51:54,991 WARN [RS:2;61c02eafbb40:37067 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T06:51:54,991 INFO [RS:2;61c02eafbb40:37067 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T06:51:54,991 DEBUG [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,37067,1733554314723 2024-12-07T06:51:54,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740 2024-12-07T06:51:54,992 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T06:51:54,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740 2024-12-07T06:51:54,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T06:51:54,994 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T06:51:54,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T06:51:54,995 INFO [RS:1;61c02eafbb40:40275 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T06:51:54,995 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:54,995 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T06:51:54,996 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T06:51:54,997 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T06:51:54,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T06:51:54,999 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T06:51:54,999 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T06:51:54,999 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T06:51:54,999 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 INFO [RS:2;61c02eafbb40:37067 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,000 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,001 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,001 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,001 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:55,001 DEBUG [RS:1;61c02eafbb40:40275 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:55,002 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T06:51:55,002 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,002 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,002 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,002 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:55,003 DEBUG [RS:2;61c02eafbb40:37067 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:55,005 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T06:51:55,005 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61087578, jitterRate=-0.08972415328025818}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T06:51:55,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733554314975Initializing all the Stores at 1733554314977 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554314977Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554314978 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554314978Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554314978Cleaning up temporary data from old regions at 1733554314995 (+17 ms)Region opened successfully at 1733554315006 (+11 ms) 2024-12-07T06:51:55,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T06:51:55,007 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T06:51:55,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T06:51:55,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T06:51:55,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T06:51:55,008 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T06:51:55,009 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,009 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,009 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,009 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,009 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,009 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40275,1733554314692-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:55,012 INFO [RS:0;61c02eafbb40:44403 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T06:51:55,012 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,012 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,013 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,013 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,013 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,013 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,013 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T06:51:55,013 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,37067,1733554314723-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:55,013 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T06:51:55,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733554315007Disabling compacts and flushes for region at 1733554315007Disabling writes for close at 1733554315007Writing region close event to WAL at 1733554315013 (+6 ms)Closed at 1733554315013 2024-12-07T06:51:55,014 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T06:51:55,014 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,014 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/61c02eafbb40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/61c02eafbb40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:55,015 DEBUG [RS:0;61c02eafbb40:44403 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T06:51:55,017 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:55,017 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T06:51:55,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T06:51:55,019 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T06:51:55,021 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T06:51:55,022 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,022 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,022 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,022 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,022 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,022 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,44403,1733554314658-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:55,030 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T06:51:55,030 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,40275,1733554314692-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,030 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,030 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.Replication(171): 61c02eafbb40,40275,1733554314692 started 2024-12-07T06:51:55,032 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T06:51:55,032 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,37067,1733554314723-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,032 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,032 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.Replication(171): 61c02eafbb40,37067,1733554314723 started 2024-12-07T06:51:55,045 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,045 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1482): Serving as 61c02eafbb40,40275,1733554314692, RpcServer on 61c02eafbb40/172.17.0.2:40275, sessionid=0x1018bb187ed0002 2024-12-07T06:51:55,045 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T06:51:55,045 DEBUG [RS:1;61c02eafbb40:40275 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 61c02eafbb40,40275,1733554314692 2024-12-07T06:51:55,045 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,40275,1733554314692' 2024-12-07T06:51:55,045 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T06:51:55,046 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T06:51:55,046 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,046 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(1482): Serving as 61c02eafbb40,37067,1733554314723, RpcServer on 61c02eafbb40/172.17.0.2:37067, sessionid=0x1018bb187ed0003 2024-12-07T06:51:55,047 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T06:51:55,047 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T06:51:55,047 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T06:51:55,047 DEBUG [RS:2;61c02eafbb40:37067 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 61c02eafbb40,37067,1733554314723 2024-12-07T06:51:55,047 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,37067,1733554314723' 2024-12-07T06:51:55,047 DEBUG [RS:1;61c02eafbb40:40275 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 61c02eafbb40,40275,1733554314692 2024-12-07T06:51:55,047 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T06:51:55,047 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,40275,1733554314692' 2024-12-07T06:51:55,047 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T06:51:55,047 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T06:51:55,047 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T06:51:55,048 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T06:51:55,048 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T06:51:55,048 DEBUG [RS:1;61c02eafbb40:40275 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T06:51:55,048 DEBUG [RS:2;61c02eafbb40:37067 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 61c02eafbb40,37067,1733554314723 2024-12-07T06:51:55,048 INFO [RS:1;61c02eafbb40:40275 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T06:51:55,048 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,37067,1733554314723' 2024-12-07T06:51:55,048 INFO [RS:1;61c02eafbb40:40275 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T06:51:55,048 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T06:51:55,049 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T06:51:55,049 DEBUG [RS:2;61c02eafbb40:37067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T06:51:55,049 INFO [RS:2;61c02eafbb40:37067 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T06:51:55,049 INFO [RS:2;61c02eafbb40:37067 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T06:51:55,050 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T06:51:55,050 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,44403,1733554314658-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,050 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,050 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.Replication(171): 61c02eafbb40,44403,1733554314658 started 2024-12-07T06:51:55,072 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,072 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1482): Serving as 61c02eafbb40,44403,1733554314658, RpcServer on 61c02eafbb40/172.17.0.2:44403, sessionid=0x1018bb187ed0001 2024-12-07T06:51:55,072 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T06:51:55,072 DEBUG [RS:0;61c02eafbb40:44403 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 61c02eafbb40,44403,1733554314658 2024-12-07T06:51:55,072 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,44403,1733554314658' 2024-12-07T06:51:55,072 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T06:51:55,073 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T06:51:55,073 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T06:51:55,074 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T06:51:55,074 DEBUG [RS:0;61c02eafbb40:44403 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 61c02eafbb40,44403,1733554314658 2024-12-07T06:51:55,074 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '61c02eafbb40,44403,1733554314658' 2024-12-07T06:51:55,074 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T06:51:55,074 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T06:51:55,075 DEBUG [RS:0;61c02eafbb40:44403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T06:51:55,075 INFO [RS:0;61c02eafbb40:44403 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T06:51:55,075 INFO [RS:0;61c02eafbb40:44403 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T06:51:55,151 INFO [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C40275%2C1733554314692, suffix=, logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,40275,1733554314692, archiveDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs, maxLogs=32 2024-12-07T06:51:55,152 INFO [RS:2;61c02eafbb40:37067 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C37067%2C1733554314723, suffix=, logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,37067,1733554314723, archiveDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs, maxLogs=32 2024-12-07T06:51:55,153 INFO [RS:1;61c02eafbb40:40275 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 61c02eafbb40%2C40275%2C1733554314692.1733554315153 2024-12-07T06:51:55,153 INFO [RS:2;61c02eafbb40:37067 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 61c02eafbb40%2C37067%2C1733554314723.1733554315153 2024-12-07T06:51:55,162 INFO [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,40275,1733554314692/61c02eafbb40%2C40275%2C1733554314692.1733554315153 2024-12-07T06:51:55,163 INFO [RS:2;61c02eafbb40:37067 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,37067,1733554314723/61c02eafbb40%2C37067%2C1733554314723.1733554315153 2024-12-07T06:51:55,165 DEBUG [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38599:38599),(127.0.0.1/127.0.0.1:38701:38701),(127.0.0.1/127.0.0.1:41107:41107)] 2024-12-07T06:51:55,166 DEBUG [RS:2;61c02eafbb40:37067 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41107:41107),(127.0.0.1/127.0.0.1:38599:38599),(127.0.0.1/127.0.0.1:38701:38701)] 2024-12-07T06:51:55,172 DEBUG [61c02eafbb40:36067 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-12-07T06:51:55,172 DEBUG [61c02eafbb40:36067 {}] balancer.BalancerClusterState(204): Hosts are {61c02eafbb40=0} racks are {/default-rack=0} 2024-12-07T06:51:55,174 DEBUG [61c02eafbb40:36067 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T06:51:55,174 DEBUG [61c02eafbb40:36067 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T06:51:55,174 DEBUG [61c02eafbb40:36067 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T06:51:55,174 DEBUG [61c02eafbb40:36067 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T06:51:55,174 INFO [61c02eafbb40:36067 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T06:51:55,174 INFO [61c02eafbb40:36067 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T06:51:55,174 DEBUG [61c02eafbb40:36067 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T06:51:55,175 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=61c02eafbb40,40275,1733554314692 2024-12-07T06:51:55,177 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 61c02eafbb40,40275,1733554314692, state=OPENING 2024-12-07T06:51:55,177 INFO [RS:0;61c02eafbb40:44403 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C44403%2C1733554314658, suffix=, logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,44403,1733554314658, archiveDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs, maxLogs=32 2024-12-07T06:51:55,178 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T06:51:55,179 INFO [RS:0;61c02eafbb40:44403 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 61c02eafbb40%2C44403%2C1733554314658.1733554315178 2024-12-07T06:51:55,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:55,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:55,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:55,184 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,185 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,185 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,185 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T06:51:55,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=61c02eafbb40,40275,1733554314692}] 2024-12-07T06:51:55,186 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,188 INFO [RS:0;61c02eafbb40:44403 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,44403,1733554314658/61c02eafbb40%2C44403%2C1733554314658.1733554315178 2024-12-07T06:51:55,193 DEBUG [RS:0;61c02eafbb40:44403 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41107:41107),(127.0.0.1/127.0.0.1:38599:38599),(127.0.0.1/127.0.0.1:38701:38701)] 2024-12-07T06:51:55,340 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T06:51:55,341 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50851, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T06:51:55,347 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T06:51:55,347 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T06:51:55,350 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=61c02eafbb40%2C40275%2C1733554314692.meta, suffix=.meta, logDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,40275,1733554314692, archiveDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs, maxLogs=32 2024-12-07T06:51:55,351 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 61c02eafbb40%2C40275%2C1733554314692.meta.1733554315351.meta 2024-12-07T06:51:55,360 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/WALs/61c02eafbb40,40275,1733554314692/61c02eafbb40%2C40275%2C1733554314692.meta.1733554315351.meta 2024-12-07T06:51:55,363 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38701:38701),(127.0.0.1/127.0.0.1:41107:41107),(127.0.0.1/127.0.0.1:38599:38599)] 2024-12-07T06:51:55,364 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T06:51:55,364 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T06:51:55,364 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T06:51:55,364 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T06:51:55,364 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T06:51:55,365 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:55,365 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T06:51:55,365 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T06:51:55,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T06:51:55,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T06:51:55,368 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:55,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:55,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T06:51:55,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T06:51:55,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:55,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:55,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T06:51:55,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T06:51:55,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:55,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:55,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T06:51:55,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T06:51:55,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:55,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T06:51:55,374 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T06:51:55,375 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740 2024-12-07T06:51:55,376 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740 2024-12-07T06:51:55,378 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T06:51:55,378 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T06:51:55,378 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T06:51:55,380 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T06:51:55,381 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63598800, jitterRate=-0.05230402946472168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T06:51:55,381 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T06:51:55,382 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733554315365Writing region info on filesystem at 1733554315365Initializing all the Stores at 1733554315366 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554315366Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554315367 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554315367Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733554315367Cleaning up temporary data from old regions at 1733554315378 (+11 ms)Running coprocessor post-open hooks at 1733554315381 (+3 ms)Region opened successfully at 1733554315382 (+1 ms) 2024-12-07T06:51:55,384 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733554315339 2024-12-07T06:51:55,387 DEBUG [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T06:51:55,387 INFO [RS_OPEN_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T06:51:55,388 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=61c02eafbb40,40275,1733554314692 2024-12-07T06:51:55,390 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 61c02eafbb40,40275,1733554314692, state=OPEN 2024-12-07T06:51:55,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:55,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:55,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:55,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T06:51:55,391 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=61c02eafbb40,40275,1733554314692 2024-12-07T06:51:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T06:51:55,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T06:51:55,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=61c02eafbb40,40275,1733554314692 in 206 msec 2024-12-07T06:51:55,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T06:51:55,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 378 msec 2024-12-07T06:51:55,401 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T06:51:55,401 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T06:51:55,402 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T06:51:55,402 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=61c02eafbb40,40275,1733554314692, seqNum=-1] 2024-12-07T06:51:55,402 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T06:51:55,404 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47673, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T06:51:55,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 484 msec 2024-12-07T06:51:55,412 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733554315412, completionTime=-1 2024-12-07T06:51:55,412 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T06:51:55,412 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T06:51:55,414 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T06:51:55,414 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733554375414 2024-12-07T06:51:55,414 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733554435414 2024-12-07T06:51:55,414 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T06:51:55,415 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,36067,1733554314609-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,415 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,36067,1733554314609-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,415 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,36067,1733554314609-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,415 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-61c02eafbb40:36067, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,415 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,415 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,417 DEBUG [master/61c02eafbb40:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.663sec 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,36067,1733554314609-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T06:51:55,420 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,36067,1733554314609-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T06:51:55,423 DEBUG [master/61c02eafbb40:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T06:51:55,423 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T06:51:55,423 INFO [master/61c02eafbb40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=61c02eafbb40,36067,1733554314609-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T06:51:55,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1632ace3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T06:51:55,438 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 61c02eafbb40,36067,-1 for getting cluster id 2024-12-07T06:51:55,438 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T06:51:55,440 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3463f44f-f7fa-46cf-9c93-04f893623a7c' 2024-12-07T06:51:55,441 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T06:51:55,441 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3463f44f-f7fa-46cf-9c93-04f893623a7c" 2024-12-07T06:51:55,441 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28fc786c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T06:51:55,441 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [61c02eafbb40,36067,-1] 2024-12-07T06:51:55,441 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T06:51:55,442 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:55,444 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55060, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T06:51:55,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4efea295, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T06:51:55,445 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T06:51:55,447 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=61c02eafbb40,40275,1733554314692, seqNum=-1] 2024-12-07T06:51:55,447 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T06:51:55,449 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T06:51:55,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=61c02eafbb40,36067,1733554314609 2024-12-07T06:51:55,452 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T06:51:55,454 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 61c02eafbb40,36067,1733554314609 2024-12-07T06:51:55,454 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@db2192d 2024-12-07T06:51:55,454 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T06:51:55,456 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55072, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T06:51:55,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T06:51:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T06:51:55,461 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T06:51:55,461 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:55,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T06:51:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:55,463 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T06:51:55,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741837_1013 (size=392) 2024-12-07T06:51:55,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741837_1013 (size=392) 2024-12-07T06:51:55,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741837_1013 (size=392) 2024-12-07T06:51:55,477 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => caa79521c82026f797a406ad32959ce2, NAME => 'TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315 2024-12-07T06:51:55,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741838_1014 (size=51) 2024-12-07T06:51:55,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741838_1014 (size=51) 2024-12-07T06:51:55,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741838_1014 (size=51) 2024-12-07T06:51:55,488 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:55,488 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing caa79521c82026f797a406ad32959ce2, disabling compactions & flushes 2024-12-07T06:51:55,488 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,488 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,488 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. after waiting 0 ms 2024-12-07T06:51:55,488 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,488 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,488 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for caa79521c82026f797a406ad32959ce2: Waiting for close lock at 1733554315488Disabling compacts and flushes for region at 1733554315488Disabling writes for close at 1733554315488Writing region close event to WAL at 1733554315488Closed at 1733554315488 2024-12-07T06:51:55,490 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T06:51:55,491 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733554315490"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733554315490"}]},"ts":"1733554315490"} 2024-12-07T06:51:55,494 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T06:51:55,495 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T06:51:55,496 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733554315496"}]},"ts":"1733554315496"} 2024-12-07T06:51:55,498 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T06:51:55,499 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {61c02eafbb40=0} racks are {/default-rack=0} 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T06:51:55,500 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T06:51:55,500 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T06:51:55,500 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T06:51:55,500 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T06:51:55,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=caa79521c82026f797a406ad32959ce2, ASSIGN}] 2024-12-07T06:51:55,502 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=caa79521c82026f797a406ad32959ce2, ASSIGN 2024-12-07T06:51:55,504 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=caa79521c82026f797a406ad32959ce2, ASSIGN; state=OFFLINE, location=61c02eafbb40,44403,1733554314658; forceNewPlan=false, retain=false 2024-12-07T06:51:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:55,654 INFO [61c02eafbb40:36067 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T06:51:55,655 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=caa79521c82026f797a406ad32959ce2, regionState=OPENING, regionLocation=61c02eafbb40,44403,1733554314658 2024-12-07T06:51:55,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=caa79521c82026f797a406ad32959ce2, ASSIGN because future has completed 2024-12-07T06:51:55,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure caa79521c82026f797a406ad32959ce2, server=61c02eafbb40,44403,1733554314658}] 2024-12-07T06:51:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:55,814 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T06:51:55,816 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59029, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T06:51:55,820 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,820 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => caa79521c82026f797a406ad32959ce2, NAME => 'TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2.', STARTKEY => '', ENDKEY => ''} 2024-12-07T06:51:55,821 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,821 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T06:51:55,821 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,821 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,823 INFO [StoreOpener-caa79521c82026f797a406ad32959ce2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,824 INFO [StoreOpener-caa79521c82026f797a406ad32959ce2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region caa79521c82026f797a406ad32959ce2 columnFamilyName cf 2024-12-07T06:51:55,825 DEBUG [StoreOpener-caa79521c82026f797a406ad32959ce2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T06:51:55,825 INFO [StoreOpener-caa79521c82026f797a406ad32959ce2-1 {}] regionserver.HStore(327): Store=caa79521c82026f797a406ad32959ce2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T06:51:55,825 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,826 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,827 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,827 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,827 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,829 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,832 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T06:51:55,833 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened caa79521c82026f797a406ad32959ce2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72661319, jitterRate=0.08273802697658539}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T06:51:55,833 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:55,834 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for caa79521c82026f797a406ad32959ce2: Running coprocessor pre-open hook at 1733554315821Writing region info on filesystem at 1733554315821Initializing all the Stores at 1733554315822 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733554315822Cleaning up temporary data from old regions at 1733554315827 (+5 ms)Running coprocessor post-open hooks at 1733554315833 (+6 ms)Region opened successfully at 1733554315834 (+1 ms) 2024-12-07T06:51:55,835 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2., pid=6, masterSystemTime=1733554315813 2024-12-07T06:51:55,839 DEBUG [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,839 INFO [RS_OPEN_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:55,840 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=caa79521c82026f797a406ad32959ce2, regionState=OPEN, openSeqNum=2, regionLocation=61c02eafbb40,44403,1733554314658 2024-12-07T06:51:55,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure caa79521c82026f797a406ad32959ce2, server=61c02eafbb40,44403,1733554314658 because future has completed 2024-12-07T06:51:55,851 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T06:51:55,851 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure caa79521c82026f797a406ad32959ce2, server=61c02eafbb40,44403,1733554314658 in 188 msec 2024-12-07T06:51:55,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T06:51:55,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=caa79521c82026f797a406ad32959ce2, ASSIGN in 351 msec 2024-12-07T06:51:55,857 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T06:51:55,857 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733554315857"}]},"ts":"1733554315857"} 2024-12-07T06:51:55,861 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T06:51:55,862 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T06:51:55,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 405 msec 2024-12-07T06:51:56,037 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T06:51:56,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T06:51:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T06:51:56,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T06:51:56,091 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T06:51:56,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T06:51:56,092 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T06:51:56,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T06:51:56,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T06:51:56,097 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T06:51:56,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T06:51:56,101 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2., hostname=61c02eafbb40,44403,1733554314658, seqNum=2] 2024-12-07T06:51:56,102 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T06:51:56,104 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T06:51:56,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T06:51:56,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T06:51:56,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:56,112 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T06:51:56,114 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T06:51:56,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T06:51:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:56,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44403 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T06:51:56,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,270 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing caa79521c82026f797a406ad32959ce2 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T06:51:56,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/.tmp/cf/c919c58586fe4d6f9543be7bf4e47f72 is 36, key is row/cf:cq/1733554316105/Put/seqid=0 2024-12-07T06:51:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741839_1015 (size=4787) 2024-12-07T06:51:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741839_1015 (size=4787) 2024-12-07T06:51:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741839_1015 (size=4787) 2024-12-07T06:51:56,299 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/.tmp/cf/c919c58586fe4d6f9543be7bf4e47f72 2024-12-07T06:51:56,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/.tmp/cf/c919c58586fe4d6f9543be7bf4e47f72 as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/cf/c919c58586fe4d6f9543be7bf4e47f72 2024-12-07T06:51:56,316 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/cf/c919c58586fe4d6f9543be7bf4e47f72, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T06:51:56,318 INFO [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for caa79521c82026f797a406ad32959ce2 in 47ms, sequenceid=5, compaction requested=false 2024-12-07T06:51:56,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for caa79521c82026f797a406ad32959ce2: 2024-12-07T06:51:56,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/61c02eafbb40:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T06:51:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T06:51:56,325 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T06:51:56,325 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-07T06:51:56,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 218 msec 2024-12-07T06:51:56,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36067 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T06:51:56,431 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T06:51:56,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T06:51:56,436 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T06:51:56,436 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:56,436 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,436 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,436 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T06:51:56,437 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T06:51:56,437 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=578089443, stopped=false 2024-12-07T06:51:56,437 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=61c02eafbb40,36067,1733554314609 2024-12-07T06:51:56,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:56,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:56,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:56,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:56,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:56,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:56,439 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T06:51:56,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T06:51:56,440 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T06:51:56,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:56,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:56,440 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:56,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:56,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:56,440 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,440 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '61c02eafbb40,44403,1733554314658' ***** 2024-12-07T06:51:56,441 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T06:51:56,441 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '61c02eafbb40,40275,1733554314692' ***** 2024-12-07T06:51:56,441 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T06:51:56,441 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T06:51:56,441 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '61c02eafbb40,37067,1733554314723' ***** 2024-12-07T06:51:56,441 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T06:51:56,441 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T06:51:56,441 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T06:51:56,441 INFO [RS:1;61c02eafbb40:40275 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T06:51:56,441 INFO [RS:0;61c02eafbb40:44403 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T06:51:56,441 INFO [RS:0;61c02eafbb40:44403 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T06:51:56,441 INFO [RS:1;61c02eafbb40:40275 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T06:51:56,441 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T06:51:56,441 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(3091): Received CLOSE for caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:56,441 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(959): stopping server 61c02eafbb40,40275,1733554314692 2024-12-07T06:51:56,441 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T06:51:56,442 INFO [RS:1;61c02eafbb40:40275 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:56,442 INFO [RS:2;61c02eafbb40:37067 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T06:51:56,442 INFO [RS:1;61c02eafbb40:40275 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;61c02eafbb40:40275. 2024-12-07T06:51:56,442 INFO [RS:2;61c02eafbb40:37067 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T06:51:56,442 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T06:51:56,442 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(959): stopping server 61c02eafbb40,37067,1733554314723 2024-12-07T06:51:56,442 DEBUG [RS:1;61c02eafbb40:40275 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:56,442 INFO [RS:2;61c02eafbb40:37067 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:56,442 DEBUG [RS:1;61c02eafbb40:40275 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,442 INFO [RS:2;61c02eafbb40:37067 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;61c02eafbb40:37067. 2024-12-07T06:51:56,442 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T06:51:56,442 DEBUG [RS:2;61c02eafbb40:37067 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:56,442 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T06:51:56,442 DEBUG [RS:2;61c02eafbb40:37067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,442 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T06:51:56,442 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T06:51:56,442 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(976): stopping server 61c02eafbb40,37067,1733554314723; all regions closed. 2024-12-07T06:51:56,441 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T06:51:56,442 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(959): stopping server 61c02eafbb40,44403,1733554314658 2024-12-07T06:51:56,443 INFO [RS:0;61c02eafbb40:44403 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:56,443 INFO [RS:0;61c02eafbb40:44403 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;61c02eafbb40:44403. 2024-12-07T06:51:56,443 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing caa79521c82026f797a406ad32959ce2, disabling compactions & flushes 2024-12-07T06:51:56,443 DEBUG [RS:0;61c02eafbb40:44403 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T06:51:56,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,443 INFO [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,443 DEBUG [RS:0;61c02eafbb40:44403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,443 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T06:51:56,443 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,443 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T06:51:56,443 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T06:51:56,443 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. after waiting 0 ms 2024-12-07T06:51:56,443 DEBUG [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T06:51:56,443 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1325): Online Regions={caa79521c82026f797a406ad32959ce2=TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2.} 2024-12-07T06:51:56,443 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,443 DEBUG [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1351): Waiting on caa79521c82026f797a406ad32959ce2 2024-12-07T06:51:56,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,443 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T06:51:56,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,443 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T06:51:56,444 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,444 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T06:51:56,444 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T06:51:56,444 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T06:51:56,444 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T06:51:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741834_1010 (size=93) 2024-12-07T06:51:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741834_1010 (size=93) 2024-12-07T06:51:56,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741834_1010 (size=93) 2024-12-07T06:51:56,451 DEBUG [RS:2;61c02eafbb40:37067 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs 2024-12-07T06:51:56,451 INFO [RS:2;61c02eafbb40:37067 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 61c02eafbb40%2C37067%2C1733554314723:(num 1733554315153) 2024-12-07T06:51:56,451 DEBUG [RS:2;61c02eafbb40:37067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,451 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:56,451 INFO [RS:2;61c02eafbb40:37067 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:56,452 INFO [RS:2;61c02eafbb40:37067 {}] hbase.ChoreService(370): Chore service for: regionserver/61c02eafbb40:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:56,452 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T06:51:56,452 INFO [regionserver/61c02eafbb40:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:56,452 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T06:51:56,452 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T06:51:56,452 INFO [RS:2;61c02eafbb40:37067 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:56,452 INFO [RS:2;61c02eafbb40:37067 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37067 2024-12-07T06:51:56,453 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/default/TestHBaseWalOnEC/caa79521c82026f797a406ad32959ce2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T06:51:56,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/61c02eafbb40,37067,1733554314723 2024-12-07T06:51:56,454 INFO [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:56,455 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for caa79521c82026f797a406ad32959ce2: Waiting for close lock at 1733554316443Running coprocessor pre-close hooks at 1733554316443Disabling compacts and flushes for region at 1733554316443Disabling writes for close at 1733554316443Writing region close event to WAL at 1733554316444 (+1 ms)Running coprocessor post-close hooks at 1733554316454 (+10 ms)Closed at 1733554316454 2024-12-07T06:51:56,455 INFO [RS:2;61c02eafbb40:37067 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:56,455 DEBUG [RS_CLOSE_REGION-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2. 2024-12-07T06:51:56,455 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [61c02eafbb40,37067,1733554314723] 2024-12-07T06:51:56,458 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/61c02eafbb40,37067,1733554314723 already deleted, retry=false 2024-12-07T06:51:56,458 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 61c02eafbb40,37067,1733554314723 expired; onlineServers=2 2024-12-07T06:51:56,466 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/info/735dd1b62db040f39d2b8dbbe083e7cd is 153, key is TestHBaseWalOnEC,,1733554315456.caa79521c82026f797a406ad32959ce2./info:regioninfo/1733554315840/Put/seqid=0 2024-12-07T06:51:56,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741840_1016 (size=6637) 2024-12-07T06:51:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741840_1016 (size=6637) 2024-12-07T06:51:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741840_1016 (size=6637) 2024-12-07T06:51:56,474 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/info/735dd1b62db040f39d2b8dbbe083e7cd 2024-12-07T06:51:56,501 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/ns/538b2949c9af41d4a53b65b175de0a35 is 43, key is default/ns:d/1733554315405/Put/seqid=0 2024-12-07T06:51:56,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741841_1017 (size=5153) 2024-12-07T06:51:56,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741841_1017 (size=5153) 2024-12-07T06:51:56,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741841_1017 (size=5153) 2024-12-07T06:51:56,510 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/ns/538b2949c9af41d4a53b65b175de0a35 2024-12-07T06:51:56,514 INFO [regionserver/61c02eafbb40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:56,514 INFO [regionserver/61c02eafbb40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:56,529 INFO [regionserver/61c02eafbb40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:56,535 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/table/c1abba7a34c2454abcbbff11f1d5ddaf is 52, key is TestHBaseWalOnEC/table:state/1733554315857/Put/seqid=0 2024-12-07T06:51:56,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741842_1018 (size=5249) 2024-12-07T06:51:56,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741842_1018 (size=5249) 2024-12-07T06:51:56,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741842_1018 (size=5249) 2024-12-07T06:51:56,544 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/table/c1abba7a34c2454abcbbff11f1d5ddaf 2024-12-07T06:51:56,552 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/info/735dd1b62db040f39d2b8dbbe083e7cd as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/info/735dd1b62db040f39d2b8dbbe083e7cd 2024-12-07T06:51:56,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,556 INFO [RS:2;61c02eafbb40:37067 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:56,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37067-0x1018bb187ed0003, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,556 INFO [RS:2;61c02eafbb40:37067 {}] regionserver.HRegionServer(1031): Exiting; stopping=61c02eafbb40,37067,1733554314723; zookeeper connection closed. 2024-12-07T06:51:56,557 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a55e9fb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a55e9fb 2024-12-07T06:51:56,561 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/info/735dd1b62db040f39d2b8dbbe083e7cd, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T06:51:56,563 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/ns/538b2949c9af41d4a53b65b175de0a35 as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/ns/538b2949c9af41d4a53b65b175de0a35 2024-12-07T06:51:56,570 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/ns/538b2949c9af41d4a53b65b175de0a35, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T06:51:56,572 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/.tmp/table/c1abba7a34c2454abcbbff11f1d5ddaf as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/table/c1abba7a34c2454abcbbff11f1d5ddaf 2024-12-07T06:51:56,580 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/table/c1abba7a34c2454abcbbff11f1d5ddaf, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T06:51:56,582 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-12-07T06:51:56,588 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T06:51:56,589 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T06:51:56,589 INFO [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T06:51:56,590 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733554316443Running coprocessor pre-close hooks at 1733554316443Disabling compacts and flushes for region at 1733554316443Disabling writes for close at 1733554316444 (+1 ms)Obtaining lock to block concurrent updates at 1733554316444Preparing flush snapshotting stores in 1588230740 at 1733554316444Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733554316444Flushing stores of hbase:meta,,1.1588230740 at 1733554316446 (+2 ms)Flushing 1588230740/info: creating writer at 1733554316446Flushing 1588230740/info: appending metadata at 1733554316465 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733554316465Flushing 1588230740/ns: creating writer at 1733554316482 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733554316501 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733554316501Flushing 1588230740/table: creating writer at 1733554316518 (+17 ms)Flushing 1588230740/table: appending metadata at 1733554316534 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733554316534Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4acaa4d2: reopening flushed file at 1733554316551 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bf17cbf: reopening flushed file at 1733554316562 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e85a659: reopening flushed file at 1733554316571 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1733554316582 (+11 ms)Writing region close event to WAL at 1733554316583 (+1 ms)Running coprocessor post-close hooks at 1733554316589 (+6 ms)Closed at 1733554316589 2024-12-07T06:51:56,590 DEBUG [RS_CLOSE_META-regionserver/61c02eafbb40:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T06:51:56,643 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(976): stopping server 61c02eafbb40,40275,1733554314692; all regions closed. 2024-12-07T06:51:56,643 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(976): stopping server 61c02eafbb40,44403,1733554314658; all regions closed. 2024-12-07T06:51:56,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,644 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,644 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,645 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,645 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,645 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,645 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741835_1011 (size=1298) 2024-12-07T06:51:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741835_1011 (size=1298) 2024-12-07T06:51:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741835_1011 (size=1298) 2024-12-07T06:51:56,652 DEBUG [RS:0;61c02eafbb40:44403 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 61c02eafbb40%2C44403%2C1733554314658:(num 1733554315178) 2024-12-07T06:51:56,652 DEBUG [RS:0;61c02eafbb40:44403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] hbase.ChoreService(370): Chore service for: regionserver/61c02eafbb40:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T06:51:56,652 INFO [regionserver/61c02eafbb40:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T06:51:56,652 INFO [RS:0;61c02eafbb40:44403 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:56,653 INFO [RS:0;61c02eafbb40:44403 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44403 2024-12-07T06:51:56,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/61c02eafbb40,44403,1733554314658 2024-12-07T06:51:56,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:56,655 INFO [RS:0;61c02eafbb40:44403 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:56,656 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [61c02eafbb40,44403,1733554314658] 2024-12-07T06:51:56,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741836_1012 (size=2751) 2024-12-07T06:51:56,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741836_1012 (size=2751) 2024-12-07T06:51:56,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741836_1012 (size=2751) 2024-12-07T06:51:56,659 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/61c02eafbb40,44403,1733554314658 already deleted, retry=false 2024-12-07T06:51:56,659 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 61c02eafbb40,44403,1733554314658 expired; onlineServers=1 2024-12-07T06:51:56,662 DEBUG [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs 2024-12-07T06:51:56,662 INFO [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 61c02eafbb40%2C40275%2C1733554314692.meta:.meta(num 1733554315351) 2024-12-07T06:51:56,663 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,663 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,663 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,663 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741833_1009 (size=93) 2024-12-07T06:51:56,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741833_1009 (size=93) 2024-12-07T06:51:56,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741833_1009 (size=93) 2024-12-07T06:51:56,669 DEBUG [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/oldWALs 2024-12-07T06:51:56,669 INFO [RS:1;61c02eafbb40:40275 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 61c02eafbb40%2C40275%2C1733554314692:(num 1733554315153) 2024-12-07T06:51:56,669 DEBUG [RS:1;61c02eafbb40:40275 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T06:51:56,669 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T06:51:56,669 INFO [RS:1;61c02eafbb40:40275 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:56,670 INFO [RS:1;61c02eafbb40:40275 {}] hbase.ChoreService(370): Chore service for: regionserver/61c02eafbb40:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:56,670 INFO [RS:1;61c02eafbb40:40275 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:56,670 INFO [regionserver/61c02eafbb40:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:56,670 INFO [RS:1;61c02eafbb40:40275 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40275 2024-12-07T06:51:56,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/61c02eafbb40,40275,1733554314692 2024-12-07T06:51:56,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T06:51:56,674 INFO [RS:1;61c02eafbb40:40275 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:56,676 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [61c02eafbb40,40275,1733554314692] 2024-12-07T06:51:56,677 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/61c02eafbb40,40275,1733554314692 already deleted, retry=false 2024-12-07T06:51:56,677 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 61c02eafbb40,40275,1733554314692 expired; onlineServers=0 2024-12-07T06:51:56,677 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '61c02eafbb40,36067,1733554314609' ***** 2024-12-07T06:51:56,677 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T06:51:56,678 INFO [M:0;61c02eafbb40:36067 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T06:51:56,678 INFO [M:0;61c02eafbb40:36067 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T06:51:56,678 DEBUG [M:0;61c02eafbb40:36067 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T06:51:56,678 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T06:51:56,678 DEBUG [M:0;61c02eafbb40:36067 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T06:51:56,678 DEBUG [master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.large.0-1733554314932 {}] cleaner.HFileCleaner(306): Exit Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.large.0-1733554314932,5,FailOnTimeoutGroup] 2024-12-07T06:51:56,678 DEBUG [master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.small.0-1733554314932 {}] cleaner.HFileCleaner(306): Exit Thread[master/61c02eafbb40:0:becomeActiveMaster-HFileCleaner.small.0-1733554314932,5,FailOnTimeoutGroup] 2024-12-07T06:51:56,678 INFO [M:0;61c02eafbb40:36067 {}] hbase.ChoreService(370): Chore service for: master/61c02eafbb40:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T06:51:56,678 INFO [M:0;61c02eafbb40:36067 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T06:51:56,678 DEBUG [M:0;61c02eafbb40:36067 {}] master.HMaster(1795): Stopping service threads 2024-12-07T06:51:56,679 INFO [M:0;61c02eafbb40:36067 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T06:51:56,679 INFO [M:0;61c02eafbb40:36067 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T06:51:56,679 INFO [M:0;61c02eafbb40:36067 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T06:51:56,679 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T06:51:56,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T06:51:56,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T06:51:56,680 DEBUG [M:0;61c02eafbb40:36067 {}] zookeeper.ZKUtil(347): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T06:51:56,680 WARN [M:0;61c02eafbb40:36067 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T06:51:56,680 INFO [M:0;61c02eafbb40:36067 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/.lastflushedseqids 2024-12-07T06:51:56,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741843_1019 (size=127) 2024-12-07T06:51:56,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741843_1019 (size=127) 2024-12-07T06:51:56,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741843_1019 (size=127) 2024-12-07T06:51:56,691 INFO [M:0;61c02eafbb40:36067 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T06:51:56,691 INFO [M:0;61c02eafbb40:36067 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T06:51:56,691 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T06:51:56,691 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:56,691 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:56,691 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T06:51:56,691 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:56,691 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-12-07T06:51:56,718 DEBUG [M:0;61c02eafbb40:36067 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0365c310bbc74052879ba8eb0ffd2bdc is 82, key is hbase:meta,,1/info:regioninfo/1733554315388/Put/seqid=0 2024-12-07T06:51:56,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741844_1020 (size=5672) 2024-12-07T06:51:56,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741844_1020 (size=5672) 2024-12-07T06:51:56,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741844_1020 (size=5672) 2024-12-07T06:51:56,728 INFO [M:0;61c02eafbb40:36067 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0365c310bbc74052879ba8eb0ffd2bdc 2024-12-07T06:51:56,753 DEBUG [M:0;61c02eafbb40:36067 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/39cb9b74c6164a298b61e60328cc7cd2 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733554315864/Put/seqid=0 2024-12-07T06:51:56,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,758 INFO [RS:0;61c02eafbb40:44403 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:56,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44403-0x1018bb187ed0001, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,758 INFO [RS:0;61c02eafbb40:44403 {}] regionserver.HRegionServer(1031): Exiting; stopping=61c02eafbb40,44403,1733554314658; zookeeper connection closed. 2024-12-07T06:51:56,759 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@59773560 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@59773560 2024-12-07T06:51:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741845_1021 (size=6437) 2024-12-07T06:51:56,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741845_1021 (size=6437) 2024-12-07T06:51:56,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741845_1021 (size=6437) 2024-12-07T06:51:56,762 INFO [M:0;61c02eafbb40:36067 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/39cb9b74c6164a298b61e60328cc7cd2 2024-12-07T06:51:56,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,776 INFO [RS:1;61c02eafbb40:40275 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:56,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40275-0x1018bb187ed0002, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,776 INFO [RS:1;61c02eafbb40:40275 {}] regionserver.HRegionServer(1031): Exiting; stopping=61c02eafbb40,40275,1733554314692; zookeeper connection closed. 2024-12-07T06:51:56,776 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@22d5921a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@22d5921a 2024-12-07T06:51:56,776 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T06:51:56,785 DEBUG [M:0;61c02eafbb40:36067 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cd23d7467c0a48379100f0c1b92ed23c is 69, key is 61c02eafbb40,37067,1733554314723/rs:state/1733554314982/Put/seqid=0 2024-12-07T06:51:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741846_1022 (size=5294) 2024-12-07T06:51:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741846_1022 (size=5294) 2024-12-07T06:51:56,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741846_1022 (size=5294) 2024-12-07T06:51:56,793 INFO [M:0;61c02eafbb40:36067 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cd23d7467c0a48379100f0c1b92ed23c 2024-12-07T06:51:56,800 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0365c310bbc74052879ba8eb0ffd2bdc as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0365c310bbc74052879ba8eb0ffd2bdc 2024-12-07T06:51:56,806 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0365c310bbc74052879ba8eb0ffd2bdc, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T06:51:56,807 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/39cb9b74c6164a298b61e60328cc7cd2 as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/39cb9b74c6164a298b61e60328cc7cd2 2024-12-07T06:51:56,814 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/39cb9b74c6164a298b61e60328cc7cd2, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T06:51:56,815 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cd23d7467c0a48379100f0c1b92ed23c as hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cd23d7467c0a48379100f0c1b92ed23c 2024-12-07T06:51:56,821 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37531/user/jenkins/test-data/63bfa217-f03a-b7b1-736f-1f7fcd39c315/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cd23d7467c0a48379100f0c1b92ed23c, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T06:51:56,823 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false 2024-12-07T06:51:56,825 INFO [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T06:51:56,825 DEBUG [M:0;61c02eafbb40:36067 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733554316691Disabling compacts and flushes for region at 1733554316691Disabling writes for close at 1733554316691Obtaining lock to block concurrent updates at 1733554316691Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733554316691Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1733554316692 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733554316693 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733554316693Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733554316717 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733554316717Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733554316735 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733554316752 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733554316752Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733554316768 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733554316784 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733554316784Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3173602a: reopening flushed file at 1733554316799 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33d78311: reopening flushed file at 1733554316806 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e9aab8a: reopening flushed file at 1733554316814 (+8 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false at 1733554316823 (+9 ms)Writing region close event to WAL at 1733554316824 (+1 ms)Closed at 1733554316824 2024-12-07T06:51:56,825 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,825 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,826 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T06:51:56,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741830_1006 (size=32653) 2024-12-07T06:51:56,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741830_1006 (size=32653) 2024-12-07T06:51:56,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741830_1006 (size=32653) 2024-12-07T06:51:56,830 INFO [M:0;61c02eafbb40:36067 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T06:51:56,830 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T06:51:56,830 INFO [M:0;61c02eafbb40:36067 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36067 2024-12-07T06:51:56,831 INFO [M:0;61c02eafbb40:36067 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T06:51:56,934 INFO [M:0;61c02eafbb40:36067 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T06:51:56,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36067-0x1018bb187ed0000, quorum=127.0.0.1:60801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T06:51:56,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@395e66b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:56,937 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@765c7210{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:56,937 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:56,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@611fd8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:56,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3beb2b8e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:56,938 WARN [BP-1161046142-172.17.0.2-1733554313592 heartbeating to localhost/127.0.0.1:37531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T06:51:56,939 WARN [BP-1161046142-172.17.0.2-1733554313592 heartbeating to localhost/127.0.0.1:37531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161046142-172.17.0.2-1733554313592 (Datanode Uuid 2754d510-d5cb-4ef8-8831-1bb4a8a6e0c9) service to localhost/127.0.0.1:37531 2024-12-07T06:51:56,939 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T06:51:56,939 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T06:51:56,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data5/current/BP-1161046142-172.17.0.2-1733554313592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:56,940 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data6/current/BP-1161046142-172.17.0.2-1733554313592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:56,940 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T06:51:56,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f2c6971{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:56,942 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78adaad1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:56,942 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:56,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6de03e39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:56,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a71642{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:56,944 WARN [BP-1161046142-172.17.0.2-1733554313592 heartbeating to localhost/127.0.0.1:37531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T06:51:56,944 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T06:51:56,944 WARN [BP-1161046142-172.17.0.2-1733554313592 heartbeating to localhost/127.0.0.1:37531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161046142-172.17.0.2-1733554313592 (Datanode Uuid 19434a39-e1a3-454c-bfe2-49a0599ef657) service to localhost/127.0.0.1:37531 2024-12-07T06:51:56,944 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T06:51:56,945 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data3/current/BP-1161046142-172.17.0.2-1733554313592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:56,945 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data4/current/BP-1161046142-172.17.0.2-1733554313592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:56,945 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T06:51:56,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4072566{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T06:51:56,948 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6002231{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:56,948 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:56,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ef18cbe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:56,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c867c3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:56,950 WARN [BP-1161046142-172.17.0.2-1733554313592 heartbeating to localhost/127.0.0.1:37531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T06:51:56,950 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T06:51:56,950 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T06:51:56,950 WARN [BP-1161046142-172.17.0.2-1733554313592 heartbeating to localhost/127.0.0.1:37531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161046142-172.17.0.2-1733554313592 (Datanode Uuid 8a5a4007-6645-43f4-9aa3-3aa0996c0309) service to localhost/127.0.0.1:37531 2024-12-07T06:51:56,950 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data1/current/BP-1161046142-172.17.0.2-1733554313592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:56,950 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/cluster_b9c399f1-980b-07b3-5296-2a8bf4cd0e9c/data/data2/current/BP-1161046142-172.17.0.2-1733554313592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T06:51:56,951 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T06:51:56,956 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d637fa1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T06:51:56,957 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6684a7bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T06:51:56,957 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T06:51:56,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@117b7671{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T06:51:56,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1921d73d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/37357f55-a14c-0088-e8da-776df44b915b/hadoop.log.dir/,STOPPED} 2024-12-07T06:51:56,965 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T06:51:56,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T06:51:56,996 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 87) - Thread LEAK? -, OpenFileDescriptor=521 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=201 (was 192) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6383 (was 6589)