2024-12-09 03:50:55,274 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 03:50:55,285 main DEBUG Took 0.009919 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 03:50:55,286 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 03:50:55,286 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 03:50:55,287 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 03:50:55,288 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,297 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 03:50:55,316 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,318 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,319 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,320 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,321 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,321 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,322 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,322 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,323 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,324 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,324 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,325 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,326 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,326 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:50:55,326 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,327 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 03:50:55,328 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:50:55,329 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 03:50:55,331 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 03:50:55,331 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 03:50:55,333 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 03:50:55,333 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 03:50:55,341 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 03:50:55,344 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 03:50:55,345 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 03:50:55,346 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 03:50:55,346 main DEBUG createAppenders(={Console}) 2024-12-09 03:50:55,347 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-09 03:50:55,347 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 03:50:55,347 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-09 03:50:55,348 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 03:50:55,348 main DEBUG OutputStream closed 2024-12-09 03:50:55,348 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 03:50:55,349 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 03:50:55,349 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-09 03:50:55,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 03:50:55,418 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 03:50:55,419 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 03:50:55,420 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 03:50:55,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 03:50:55,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 03:50:55,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 03:50:55,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 03:50:55,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 03:50:55,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 03:50:55,424 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 03:50:55,425 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 03:50:55,425 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 03:50:55,426 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 03:50:55,426 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 03:50:55,427 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 03:50:55,427 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 03:50:55,428 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 03:50:55,430 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 03:50:55,430 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-09 03:50:55,431 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 03:50:55,432 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-09T03:50:55,450 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-09 03:50:55,453 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 03:50:55,454 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T03:50:55,701 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662 2024-12-09T03:50:55,725 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902, deleteOnExit=true 2024-12-09T03:50:55,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/test.cache.data in system properties and HBase conf 2024-12-09T03:50:55,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:50:55,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:50:55,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:50:55,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:50:55,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:50:55,816 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T03:50:55,908 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:50:55,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:50:55,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:50:55,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:50:55,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:50:55,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:50:55,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:50:55,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:50:55,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:50:55,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:50:55,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:50:55,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:50:55,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:50:55,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:50:55,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:50:56,745 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T03:50:56,821 INFO [Time-limited test {}] log.Log(170): Logging initialized @2283ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T03:50:56,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:50:56,959 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:50:56,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:50:56,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:50:56,982 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:50:56,995 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:50:56,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:50:56,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:50:57,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/java.io.tmpdir/jetty-localhost-41695-hadoop-hdfs-3_4_1-tests_jar-_-any-4114666790978926611/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:50:57,207 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:41695} 2024-12-09T03:50:57,208 INFO [Time-limited test {}] server.Server(415): Started @2670ms 2024-12-09T03:50:57,591 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:50:57,597 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:50:57,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:50:57,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:50:57,599 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:50:57,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:50:57,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:50:57,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/java.io.tmpdir/jetty-localhost-36851-hadoop-hdfs-3_4_1-tests_jar-_-any-12767763526661851245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:50:57,722 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:36851} 2024-12-09T03:50:57,722 INFO [Time-limited test {}] server.Server(415): Started @3185ms 2024-12-09T03:50:57,778 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:50:57,894 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:50:57,898 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:50:57,900 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:50:57,901 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:50:57,901 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:50:57,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:50:57,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:50:58,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/java.io.tmpdir/jetty-localhost-35839-hadoop-hdfs-3_4_1-tests_jar-_-any-13856536871840021376/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:50:58,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:35839} 2024-12-09T03:50:58,035 INFO [Time-limited test {}] server.Server(415): Started @3497ms 2024-12-09T03:50:58,037 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:50:58,077 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:50:58,082 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:50:58,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:50:58,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:50:58,084 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:50:58,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:50:58,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:50:58,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/java.io.tmpdir/jetty-localhost-35305-hadoop-hdfs-3_4_1-tests_jar-_-any-16973975822969616543/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:50:58,219 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:35305} 2024-12-09T03:50:58,220 INFO [Time-limited test {}] server.Server(415): Started @3682ms 2024-12-09T03:50:58,222 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:50:58,237 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data2/current/BP-216086345-172.17.0.2-1733716256505/current, will proceed with Du for space computation calculation, 2024-12-09T03:50:58,237 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data1/current/BP-216086345-172.17.0.2-1733716256505/current, will proceed with Du for space computation calculation, 2024-12-09T03:50:58,237 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data4/current/BP-216086345-172.17.0.2-1733716256505/current, will proceed with Du for space computation calculation, 2024-12-09T03:50:58,237 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data3/current/BP-216086345-172.17.0.2-1733716256505/current, will proceed with Du for space computation calculation, 2024-12-09T03:50:58,281 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:50:58,282 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:50:58,355 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3939e3927af72122 with lease ID 0x3fd529f1bee4f8e9: Processing first storage report for DS-d05c67c4-8e69-4655-8cf5-cac201faae14 from datanode DatanodeRegistration(127.0.0.1:34573, datanodeUuid=01635950-7823-4032-b34f-cf031dfd9b5d, infoPort=38845, infoSecurePort=0, ipcPort=37153, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505) 2024-12-09T03:50:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3939e3927af72122 with lease ID 0x3fd529f1bee4f8e9: from storage DS-d05c67c4-8e69-4655-8cf5-cac201faae14 node DatanodeRegistration(127.0.0.1:34573, datanodeUuid=01635950-7823-4032-b34f-cf031dfd9b5d, infoPort=38845, infoSecurePort=0, ipcPort=37153, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:50:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5b1c824602d0765 with lease ID 0x3fd529f1bee4f8e8: Processing first storage report for DS-08b1bb43-3dfa-4495-8166-03c74cf62826 from datanode DatanodeRegistration(127.0.0.1:42491, datanodeUuid=4dd6e86f-32df-42db-aee2-16fb3ec94110, infoPort=43637, infoSecurePort=0, ipcPort=37665, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505) 2024-12-09T03:50:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5b1c824602d0765 with lease ID 0x3fd529f1bee4f8e8: from storage DS-08b1bb43-3dfa-4495-8166-03c74cf62826 node DatanodeRegistration(127.0.0.1:42491, datanodeUuid=4dd6e86f-32df-42db-aee2-16fb3ec94110, infoPort=43637, infoSecurePort=0, ipcPort=37665, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:50:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3939e3927af72122 with lease ID 0x3fd529f1bee4f8e9: Processing first storage report for DS-fc66b130-dcd9-4385-a10e-818d11de4b6a from datanode DatanodeRegistration(127.0.0.1:34573, datanodeUuid=01635950-7823-4032-b34f-cf031dfd9b5d, infoPort=38845, infoSecurePort=0, ipcPort=37153, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505) 2024-12-09T03:50:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3939e3927af72122 with lease ID 0x3fd529f1bee4f8e9: from storage DS-fc66b130-dcd9-4385-a10e-818d11de4b6a node DatanodeRegistration(127.0.0.1:34573, datanodeUuid=01635950-7823-4032-b34f-cf031dfd9b5d, infoPort=38845, infoSecurePort=0, ipcPort=37153, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:50:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5b1c824602d0765 with lease ID 0x3fd529f1bee4f8e8: Processing first storage report for DS-7f303f0f-5d97-419d-97fc-e1e81edebcfa from datanode DatanodeRegistration(127.0.0.1:42491, datanodeUuid=4dd6e86f-32df-42db-aee2-16fb3ec94110, infoPort=43637, infoSecurePort=0, ipcPort=37665, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505) 2024-12-09T03:50:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5b1c824602d0765 with lease ID 0x3fd529f1bee4f8e8: from storage DS-7f303f0f-5d97-419d-97fc-e1e81edebcfa node DatanodeRegistration(127.0.0.1:42491, datanodeUuid=4dd6e86f-32df-42db-aee2-16fb3ec94110, infoPort=43637, infoSecurePort=0, ipcPort=37665, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:50:58,360 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data5/current/BP-216086345-172.17.0.2-1733716256505/current, will proceed with Du for space computation calculation, 2024-12-09T03:50:58,360 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data6/current/BP-216086345-172.17.0.2-1733716256505/current, will proceed with Du for space computation calculation, 2024-12-09T03:50:58,389 WARN [Thread-119 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:50:58,395 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x89c5297eca9da5f3 with lease ID 0x3fd529f1bee4f8ea: Processing first storage report for DS-6209cff2-830a-4fbb-ae94-aac7695fb660 from datanode DatanodeRegistration(127.0.0.1:39673, datanodeUuid=fea5b7a2-fe98-4716-b8bc-c906225e966c, infoPort=42077, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505) 2024-12-09T03:50:58,395 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89c5297eca9da5f3 with lease ID 0x3fd529f1bee4f8ea: from storage DS-6209cff2-830a-4fbb-ae94-aac7695fb660 node DatanodeRegistration(127.0.0.1:39673, datanodeUuid=fea5b7a2-fe98-4716-b8bc-c906225e966c, infoPort=42077, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:50:58,395 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x89c5297eca9da5f3 with lease ID 0x3fd529f1bee4f8ea: Processing first storage report for DS-a074469a-540a-42ea-975f-1af57cc90431 from datanode DatanodeRegistration(127.0.0.1:39673, datanodeUuid=fea5b7a2-fe98-4716-b8bc-c906225e966c, infoPort=42077, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505) 2024-12-09T03:50:58,395 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89c5297eca9da5f3 with lease ID 0x3fd529f1bee4f8ea: from storage DS-a074469a-540a-42ea-975f-1af57cc90431 node DatanodeRegistration(127.0.0.1:39673, datanodeUuid=fea5b7a2-fe98-4716-b8bc-c906225e966c, infoPort=42077, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=508719271;c=1733716256505), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:50:58,612 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662 2024-12-09T03:50:58,687 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-09T03:50:58,742 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=122, ProcessCount=11, AvailableMemoryMB=12306 2024-12-09T03:50:58,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:50:58,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-09T03:50:58,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/zookeeper_0, clientPort=53617, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:50:58,862 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53617 2024-12-09T03:50:58,877 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:50:58,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:50:58,964 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:50:58,964 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:50:59,021 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:60172 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60172 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:50:59,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-09T03:50:59,443 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:50:59,453 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68 with version=8 2024-12-09T03:50:59,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/hbase-staging 2024-12-09T03:50:59,545 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T03:50:59,789 INFO [Time-limited test {}] client.ConnectionUtils(128): master/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:50:59,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:50:59,801 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:50:59,805 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:50:59,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:50:59,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:50:59,937 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:51:00,001 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T03:51:00,010 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T03:51:00,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:00,043 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 8299 (auto-detected) 2024-12-09T03:51:00,045 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T03:51:00,066 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42121 2024-12-09T03:51:00,087 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42121 connecting to ZooKeeper ensemble=127.0.0.1:53617 2024-12-09T03:51:00,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:421210x0, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:00,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42121-0x10195589a9a0000 connected 2024-12-09T03:51:00,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,158 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:00,177 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68, hbase.cluster.distributed=false 2024-12-09T03:51:00,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:00,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42121 2024-12-09T03:51:00,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42121 2024-12-09T03:51:00,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42121 2024-12-09T03:51:00,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42121 2024-12-09T03:51:00,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42121 2024-12-09T03:51:00,337 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:00,340 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,341 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:00,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:00,344 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:51:00,349 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:00,353 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36451 2024-12-09T03:51:00,356 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36451 connecting to ZooKeeper ensemble=127.0.0.1:53617 2024-12-09T03:51:00,357 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,364 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364510x0, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:00,374 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364510x0, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:00,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36451-0x10195589a9a0001 connected 2024-12-09T03:51:00,380 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:51:00,390 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:51:00,392 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:51:00,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:00,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36451 2024-12-09T03:51:00,398 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36451 2024-12-09T03:51:00,399 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36451 2024-12-09T03:51:00,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36451 2024-12-09T03:51:00,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36451 2024-12-09T03:51:00,418 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:00,419 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,419 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,420 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:00,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:00,420 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:51:00,420 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:00,421 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43719 2024-12-09T03:51:00,423 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43719 connecting to ZooKeeper ensemble=127.0.0.1:53617 2024-12-09T03:51:00,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,429 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437190x0, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:00,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43719-0x10195589a9a0002 connected 2024-12-09T03:51:00,438 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:00,440 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:51:00,447 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:51:00,449 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:51:00,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:00,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43719 2024-12-09T03:51:00,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43719 2024-12-09T03:51:00,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43719 2024-12-09T03:51:00,456 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43719 2024-12-09T03:51:00,457 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43719 2024-12-09T03:51:00,475 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:00,475 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,475 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,476 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:00,476 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:00,476 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:00,476 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:51:00,476 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:00,477 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42313 2024-12-09T03:51:00,479 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42313 connecting to ZooKeeper ensemble=127.0.0.1:53617 2024-12-09T03:51:00,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,482 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423130x0, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:00,489 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423130x0, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:00,489 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42313-0x10195589a9a0003 connected 2024-12-09T03:51:00,490 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:51:00,493 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:51:00,494 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:51:00,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:00,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42313 2024-12-09T03:51:00,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42313 2024-12-09T03:51:00,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42313 2024-12-09T03:51:00,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42313 2024-12-09T03:51:00,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42313 2024-12-09T03:51:00,521 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;55d0183f16d2:42121 2024-12-09T03:51:00,522 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/55d0183f16d2,42121,1733716259597 2024-12-09T03:51:00,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,534 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/55d0183f16d2,42121,1733716259597 2024-12-09T03:51:00,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:00,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:00,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:00,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,562 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:51:00,564 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/55d0183f16d2,42121,1733716259597 from backup master directory 2024-12-09T03:51:00,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/55d0183f16d2,42121,1733716259597 2024-12-09T03:51:00,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:00,571 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:00,571 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=55d0183f16d2,42121,1733716259597 2024-12-09T03:51:00,574 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T03:51:00,576 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T03:51:00,649 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/hbase.id] with ID: e9914944-b583-4516-833d-8439e5208c15 2024-12-09T03:51:00,650 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/.tmp/hbase.id 2024-12-09T03:51:00,657 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,657 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:35686 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35686 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:00,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-09T03:51:00,675 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:00,675 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/.tmp/hbase.id]:[hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/hbase.id] 2024-12-09T03:51:00,722 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:00,726 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:51:00,745 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-09T03:51:00,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:00,761 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,761 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,768 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58150 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58150 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:00,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-09T03:51:00,774 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:00,790 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:51:00,792 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:51:00,799 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T03:51:00,829 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,830 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58158 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58158 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:00,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-09T03:51:00,844 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:00,863 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store 2024-12-09T03:51:00,885 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,885 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:00,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58176 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58176 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:00,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-09T03:51:00,894 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:00,898 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T03:51:00,901 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:00,902 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:51:00,902 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:00,903 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:00,905 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:51:00,905 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:00,905 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:00,906 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733716260902Disabling compacts and flushes for region at 1733716260902Disabling writes for close at 1733716260905 (+3 ms)Writing region close event to WAL at 1733716260905Closed at 1733716260905 2024-12-09T03:51:00,908 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/.initializing 2024-12-09T03:51:00,908 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/WALs/55d0183f16d2,42121,1733716259597 2024-12-09T03:51:00,917 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T03:51:00,934 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C42121%2C1733716259597, suffix=, logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/WALs/55d0183f16d2,42121,1733716259597, archiveDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/oldWALs, maxLogs=10 2024-12-09T03:51:00,968 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/WALs/55d0183f16d2,42121,1733716259597/55d0183f16d2%2C42121%2C1733716259597.1733716260940, exclude list is [], retry=0 2024-12-09T03:51:00,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:00,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34573,DS-d05c67c4-8e69-4655-8cf5-cac201faae14,DISK] 2024-12-09T03:51:00,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39673,DS-6209cff2-830a-4fbb-ae94-aac7695fb660,DISK] 2024-12-09T03:51:00,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42491,DS-08b1bb43-3dfa-4495-8166-03c74cf62826,DISK] 2024-12-09T03:51:00,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T03:51:01,045 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/WALs/55d0183f16d2,42121,1733716259597/55d0183f16d2%2C42121%2C1733716259597.1733716260940 2024-12-09T03:51:01,046 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42077:42077),(127.0.0.1/127.0.0.1:43637:43637),(127.0.0.1/127.0.0.1:38845:38845)] 2024-12-09T03:51:01,047 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:51:01,047 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:01,051 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,052 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:51:01,154 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:01,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,162 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:51:01,162 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:01,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:51:01,167 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:01,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:51:01,172 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:01,173 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,177 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,179 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,186 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,187 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,191 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:51:01,195 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:01,215 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:51:01,218 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68238733, jitterRate=0.016836360096931458}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:51:01,225 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733716261068Initializing all the Stores at 1733716261074 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716261075 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716261076 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716261076Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716261076Cleaning up temporary data from old regions at 1733716261187 (+111 ms)Region opened successfully at 1733716261225 (+38 ms) 2024-12-09T03:51:01,226 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:51:01,264 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2633c7c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:01,297 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:51:01,309 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:51:01,310 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:51:01,313 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:51:01,314 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T03:51:01,320 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-09T03:51:01,321 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:51:01,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-09T03:51:01,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-09T03:51:01,349 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:51:01,360 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:51:01,362 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:51:01,364 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:51:01,366 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:51:01,368 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:51:01,370 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:51:01,374 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:51:01,376 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:51:01,377 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:51:01,378 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:51:01,397 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:51:01,399 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:01,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,409 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=55d0183f16d2,42121,1733716259597, sessionid=0x10195589a9a0000, setting cluster-up flag (Was=false) 2024-12-09T03:51:01,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,430 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:51:01,432 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=55d0183f16d2,42121,1733716259597 2024-12-09T03:51:01,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:01,445 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:51:01,446 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=55d0183f16d2,42121,1733716259597 2024-12-09T03:51:01,453 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:51:01,511 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(746): ClusterId : e9914944-b583-4516-833d-8439e5208c15 2024-12-09T03:51:01,511 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(746): ClusterId : e9914944-b583-4516-833d-8439e5208c15 2024-12-09T03:51:01,511 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(746): ClusterId : e9914944-b583-4516-833d-8439e5208c15 2024-12-09T03:51:01,515 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:51:01,515 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:51:01,515 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:51:01,522 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:51:01,522 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:51:01,522 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:51:01,523 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:51:01,523 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:51:01,523 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:51:01,526 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:51:01,526 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:51:01,526 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:51:01,527 DEBUG [RS:2;55d0183f16d2:42313 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2098a8f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:01,527 DEBUG [RS:0;55d0183f16d2:36451 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c9f6004, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:01,527 DEBUG [RS:1;55d0183f16d2:43719 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf1b8ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:01,536 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:01,543 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;55d0183f16d2:36451 2024-12-09T03:51:01,546 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:51:01,546 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:51:01,546 DEBUG [RS:1;55d0183f16d2:43719 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;55d0183f16d2:43719 2024-12-09T03:51:01,547 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:51:01,547 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;55d0183f16d2:42313 2024-12-09T03:51:01,547 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:51:01,547 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:51:01,547 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:51:01,547 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:51:01,547 DEBUG [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:51:01,547 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:51:01,549 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:51:01,550 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(2659): reportForDuty to master=55d0183f16d2,42121,1733716259597 with port=42313, startcode=1733716260475 2024-12-09T03:51:01,550 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(2659): reportForDuty to master=55d0183f16d2,42121,1733716259597 with port=36451, startcode=1733716260295 2024-12-09T03:51:01,550 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(2659): reportForDuty to master=55d0183f16d2,42121,1733716259597 with port=43719, startcode=1733716260418 2024-12-09T03:51:01,557 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:51:01,564 DEBUG [RS:2;55d0183f16d2:42313 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:51:01,564 DEBUG [RS:1;55d0183f16d2:43719 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:51:01,564 DEBUG [RS:0;55d0183f16d2:36451 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:51:01,563 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 55d0183f16d2,42121,1733716259597 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:51:01,571 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:01,571 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:01,571 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:01,571 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:01,571 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/55d0183f16d2:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:51:01,572 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,572 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:01,572 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,577 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733716291576 2024-12-09T03:51:01,578 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:51:01,580 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:51:01,585 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:51:01,585 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:51:01,586 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:51:01,586 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:51:01,587 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,591 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:51:01,593 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:51:01,593 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:51:01,596 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:51:01,597 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:51:01,598 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:01,599 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:51:01,599 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.large.0-1733716261598,5,FailOnTimeoutGroup] 2024-12-09T03:51:01,601 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.small.0-1733716261599,5,FailOnTimeoutGroup] 2024-12-09T03:51:01,601 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,601 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:51:01,602 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,604 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,606 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46373, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:51:01,606 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53911, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:51:01,606 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49127, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:51:01,607 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,608 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:51:01,613 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42121 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 55d0183f16d2,43719,1733716260418 2024-12-09T03:51:01,616 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42121 {}] master.ServerManager(517): Registering regionserver=55d0183f16d2,43719,1733716260418 2024-12-09T03:51:01,620 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:01,620 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:01,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:35720 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35720 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:01,634 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42121 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 55d0183f16d2,36451,1733716260295 2024-12-09T03:51:01,635 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42121 {}] master.ServerManager(517): Registering regionserver=55d0183f16d2,36451,1733716260295 2024-12-09T03:51:01,639 DEBUG [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68 2024-12-09T03:51:01,639 DEBUG [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46337 2024-12-09T03:51:01,640 DEBUG [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:51:01,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42121 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 55d0183f16d2,42313,1733716260475 2024-12-09T03:51:01,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42121 {}] master.ServerManager(517): Registering regionserver=55d0183f16d2,42313,1733716260475 2024-12-09T03:51:01,641 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68 2024-12-09T03:51:01,641 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46337 2024-12-09T03:51:01,642 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:51:01,647 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68 2024-12-09T03:51:01,647 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46337 2024-12-09T03:51:01,647 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:51:01,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:01,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-09T03:51:01,651 DEBUG [RS:0;55d0183f16d2:36451 {}] zookeeper.ZKUtil(111): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/55d0183f16d2,36451,1733716260295 2024-12-09T03:51:01,652 WARN [RS:0;55d0183f16d2:36451 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:01,652 INFO [RS:0;55d0183f16d2:36451 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T03:51:01,652 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295 2024-12-09T03:51:01,653 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:01,653 DEBUG [RS:1;55d0183f16d2:43719 {}] zookeeper.ZKUtil(111): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/55d0183f16d2,43719,1733716260418 2024-12-09T03:51:01,654 WARN [RS:1;55d0183f16d2:43719 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:01,654 DEBUG [RS:2;55d0183f16d2:42313 {}] zookeeper.ZKUtil(111): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/55d0183f16d2,42313,1733716260475 2024-12-09T03:51:01,654 INFO [RS:1;55d0183f16d2:43719 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T03:51:01,654 WARN [RS:2;55d0183f16d2:42313 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:01,654 INFO [RS:2;55d0183f16d2:42313 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T03:51:01,654 DEBUG [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,43719,1733716260418 2024-12-09T03:51:01,654 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,42313,1733716260475 2024-12-09T03:51:01,655 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:51:01,655 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68 2024-12-09T03:51:01,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [55d0183f16d2,43719,1733716260418] 2024-12-09T03:51:01,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [55d0183f16d2,42313,1733716260475] 2024-12-09T03:51:01,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [55d0183f16d2,36451,1733716260295] 2024-12-09T03:51:01,671 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:01,672 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:01,675 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58210 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58210 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:01,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-09T03:51:01,680 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:01,682 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:01,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:51:01,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:51:01,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:01,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:51:01,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:51:01,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:01,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:51:01,694 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:51:01,694 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:51:01,694 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:51:01,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:51:01,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:01,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:51:01,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:51:01,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:01,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:01,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:51:01,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740 2024-12-09T03:51:01,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740 2024-12-09T03:51:01,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:51:01,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:51:01,707 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:51:01,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:51:01,713 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:51:01,713 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:51:01,713 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:51:01,720 INFO [RS:2;55d0183f16d2:42313 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:51:01,720 INFO [RS:1;55d0183f16d2:43719 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:51:01,720 INFO [RS:0;55d0183f16d2:36451 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:51:01,720 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,720 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,720 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,721 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:51:01,722 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:51:01,724 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70810366, jitterRate=0.05515667796134949}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:51:01,724 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:51:01,724 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:51:01,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733716261682Initializing all the Stores at 1733716261684 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716261684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716261684Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716261684Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716261684Cleaning up temporary data from old regions at 1733716261706 (+22 ms)Region opened successfully at 1733716261726 (+20 ms) 2024-12-09T03:51:01,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:51:01,726 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:51:01,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:51:01,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:51:01,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:51:01,728 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:51:01,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733716261726Disabling compacts and flushes for region at 1733716261726Disabling writes for close at 1733716261727 (+1 ms)Writing region close event to WAL at 1733716261728 (+1 ms)Closed at 1733716261728 2024-12-09T03:51:01,729 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:51:01,729 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:51:01,729 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:51:01,730 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,730 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,730 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,731 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,731 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:01,732 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:01,732 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:01,732 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:51:01,732 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,732 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,733 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,733 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:01,733 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,733 DEBUG [RS:1;55d0183f16d2:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:01,733 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,733 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:01,733 DEBUG [RS:2;55d0183f16d2:42313 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:01,734 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,734 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,736 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,736 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42313,1733716260475-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:01,736 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,737 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,43719,1733716260418-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:01,737 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,739 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:51:01,744 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:01,745 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:01,745 DEBUG [RS:0;55d0183f16d2:36451 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:01,745 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,746 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,746 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,746 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,746 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,746 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,36451,1733716260295-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:01,750 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:51:01,753 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:51:01,759 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:51:01,761 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42313,1733716260475-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,761 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:51:01,761 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,761 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,43719,1733716260418-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,761 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.Replication(171): 55d0183f16d2,42313,1733716260475 started 2024-12-09T03:51:01,762 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,762 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.Replication(171): 55d0183f16d2,43719,1733716260418 started 2024-12-09T03:51:01,768 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:51:01,769 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,36451,1733716260295-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,769 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,769 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.Replication(171): 55d0183f16d2,36451,1733716260295 started 2024-12-09T03:51:01,780 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,781 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1482): Serving as 55d0183f16d2,42313,1733716260475, RpcServer on 55d0183f16d2/172.17.0.2:42313, sessionid=0x10195589a9a0003 2024-12-09T03:51:01,781 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:51:01,782 DEBUG [RS:2;55d0183f16d2:42313 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 55d0183f16d2,42313,1733716260475 2024-12-09T03:51:01,782 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,42313,1733716260475' 2024-12-09T03:51:01,782 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:51:01,783 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,783 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(1482): Serving as 55d0183f16d2,43719,1733716260418, RpcServer on 55d0183f16d2/172.17.0.2:43719, sessionid=0x10195589a9a0002 2024-12-09T03:51:01,783 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:51:01,783 DEBUG [RS:1;55d0183f16d2:43719 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 55d0183f16d2,43719,1733716260418 2024-12-09T03:51:01,783 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,43719,1733716260418' 2024-12-09T03:51:01,783 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:51:01,783 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:51:01,784 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:51:01,784 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:51:01,784 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:51:01,784 DEBUG [RS:2;55d0183f16d2:42313 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 55d0183f16d2,42313,1733716260475 2024-12-09T03:51:01,784 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,42313,1733716260475' 2024-12-09T03:51:01,784 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:51:01,785 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:51:01,785 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:51:01,785 DEBUG [RS:1;55d0183f16d2:43719 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 55d0183f16d2,43719,1733716260418 2024-12-09T03:51:01,785 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,43719,1733716260418' 2024-12-09T03:51:01,785 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:51:01,785 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:51:01,786 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:51:01,786 DEBUG [RS:2;55d0183f16d2:42313 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:51:01,786 INFO [RS:2;55d0183f16d2:42313 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:51:01,786 INFO [RS:2;55d0183f16d2:42313 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:51:01,786 DEBUG [RS:1;55d0183f16d2:43719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:51:01,786 INFO [RS:1;55d0183f16d2:43719 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:51:01,786 INFO [RS:1;55d0183f16d2:43719 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:51:01,793 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:01,794 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1482): Serving as 55d0183f16d2,36451,1733716260295, RpcServer on 55d0183f16d2/172.17.0.2:36451, sessionid=0x10195589a9a0001 2024-12-09T03:51:01,794 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:51:01,794 DEBUG [RS:0;55d0183f16d2:36451 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 55d0183f16d2,36451,1733716260295 2024-12-09T03:51:01,794 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,36451,1733716260295' 2024-12-09T03:51:01,794 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:51:01,795 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:51:01,796 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:51:01,796 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:51:01,796 DEBUG [RS:0;55d0183f16d2:36451 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 55d0183f16d2,36451,1733716260295 2024-12-09T03:51:01,796 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,36451,1733716260295' 2024-12-09T03:51:01,796 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:51:01,797 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:51:01,797 DEBUG [RS:0;55d0183f16d2:36451 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:51:01,797 INFO [RS:0;55d0183f16d2:36451 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:51:01,797 INFO [RS:0;55d0183f16d2:36451 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:51:01,892 INFO [RS:1;55d0183f16d2:43719 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T03:51:01,892 INFO [RS:2;55d0183f16d2:42313 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T03:51:01,895 INFO [RS:1;55d0183f16d2:43719 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C43719%2C1733716260418, suffix=, logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,43719,1733716260418, archiveDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs, maxLogs=32 2024-12-09T03:51:01,895 INFO [RS:2;55d0183f16d2:42313 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C42313%2C1733716260475, suffix=, logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,42313,1733716260475, archiveDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs, maxLogs=32 2024-12-09T03:51:01,898 INFO [RS:0;55d0183f16d2:36451 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T03:51:01,901 INFO [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C36451%2C1733716260295, suffix=, logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295, archiveDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs, maxLogs=32 2024-12-09T03:51:01,904 WARN [55d0183f16d2:42121 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:51:01,914 DEBUG [RS:1;55d0183f16d2:43719 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,43719,1733716260418/55d0183f16d2%2C43719%2C1733716260418.1733716261900, exclude list is [], retry=0 2024-12-09T03:51:01,914 DEBUG [RS:2;55d0183f16d2:42313 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,42313,1733716260475/55d0183f16d2%2C42313%2C1733716260475.1733716261900, exclude list is [], retry=0 2024-12-09T03:51:01,919 DEBUG [RS:0;55d0183f16d2:36451 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295/55d0183f16d2%2C36451%2C1733716260295.1733716261902, exclude list is [], retry=0 2024-12-09T03:51:01,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34573,DS-d05c67c4-8e69-4655-8cf5-cac201faae14,DISK] 2024-12-09T03:51:01,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39673,DS-6209cff2-830a-4fbb-ae94-aac7695fb660,DISK] 2024-12-09T03:51:01,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34573,DS-d05c67c4-8e69-4655-8cf5-cac201faae14,DISK] 2024-12-09T03:51:01,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42491,DS-08b1bb43-3dfa-4495-8166-03c74cf62826,DISK] 2024-12-09T03:51:01,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42491,DS-08b1bb43-3dfa-4495-8166-03c74cf62826,DISK] 2024-12-09T03:51:01,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39673,DS-6209cff2-830a-4fbb-ae94-aac7695fb660,DISK] 2024-12-09T03:51:01,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34573,DS-d05c67c4-8e69-4655-8cf5-cac201faae14,DISK] 2024-12-09T03:51:01,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42491,DS-08b1bb43-3dfa-4495-8166-03c74cf62826,DISK] 2024-12-09T03:51:01,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39673,DS-6209cff2-830a-4fbb-ae94-aac7695fb660,DISK] 2024-12-09T03:51:01,976 INFO [RS:1;55d0183f16d2:43719 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,43719,1733716260418/55d0183f16d2%2C43719%2C1733716260418.1733716261900 2024-12-09T03:51:01,976 INFO [RS:2;55d0183f16d2:42313 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,42313,1733716260475/55d0183f16d2%2C42313%2C1733716260475.1733716261900 2024-12-09T03:51:01,978 INFO [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295/55d0183f16d2%2C36451%2C1733716260295.1733716261902 2024-12-09T03:51:01,978 DEBUG [RS:1;55d0183f16d2:43719 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42077:42077),(127.0.0.1/127.0.0.1:38845:38845),(127.0.0.1/127.0.0.1:43637:43637)] 2024-12-09T03:51:01,980 DEBUG [RS:2;55d0183f16d2:42313 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38845:38845),(127.0.0.1/127.0.0.1:42077:42077),(127.0.0.1/127.0.0.1:43637:43637)] 2024-12-09T03:51:01,980 DEBUG [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38845:38845),(127.0.0.1/127.0.0.1:42077:42077),(127.0.0.1/127.0.0.1:43637:43637)] 2024-12-09T03:51:02,156 DEBUG [55d0183f16d2:42121 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T03:51:02,164 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(204): Hosts are {55d0183f16d2=0} racks are {/default-rack=0} 2024-12-09T03:51:02,171 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T03:51:02,171 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T03:51:02,171 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T03:51:02,172 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T03:51:02,172 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T03:51:02,172 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T03:51:02,172 INFO [55d0183f16d2:42121 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T03:51:02,172 INFO [55d0183f16d2:42121 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T03:51:02,172 INFO [55d0183f16d2:42121 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T03:51:02,172 DEBUG [55d0183f16d2:42121 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T03:51:02,180 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=55d0183f16d2,36451,1733716260295 2024-12-09T03:51:02,186 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 55d0183f16d2,36451,1733716260295, state=OPENING 2024-12-09T03:51:02,191 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:51:02,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:02,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:02,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:02,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:02,194 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,194 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,194 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,194 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,195 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:51:02,197 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=55d0183f16d2,36451,1733716260295}] 2024-12-09T03:51:02,374 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:51:02,377 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39511, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:51:02,390 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:51:02,390 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T03:51:02,391 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T03:51:02,394 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C36451%2C1733716260295.meta, suffix=.meta, logDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295, archiveDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs, maxLogs=32 2024-12-09T03:51:02,409 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295/55d0183f16d2%2C36451%2C1733716260295.meta.1733716262395.meta, exclude list is [], retry=0 2024-12-09T03:51:02,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39673,DS-6209cff2-830a-4fbb-ae94-aac7695fb660,DISK] 2024-12-09T03:51:02,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42491,DS-08b1bb43-3dfa-4495-8166-03c74cf62826,DISK] 2024-12-09T03:51:02,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34573,DS-d05c67c4-8e69-4655-8cf5-cac201faae14,DISK] 2024-12-09T03:51:02,417 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/WALs/55d0183f16d2,36451,1733716260295/55d0183f16d2%2C36451%2C1733716260295.meta.1733716262395.meta 2024-12-09T03:51:02,418 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42077:42077),(127.0.0.1/127.0.0.1:43637:43637),(127.0.0.1/127.0.0.1:38845:38845)] 2024-12-09T03:51:02,418 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:51:02,420 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:51:02,422 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:51:02,428 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:51:02,432 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:51:02,433 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:02,433 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:51:02,433 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:51:02,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:51:02,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:51:02,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:02,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:02,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:51:02,441 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:51:02,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:02,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:02,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:51:02,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:51:02,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:02,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:02,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:51:02,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:51:02,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:02,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:02,447 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:51:02,449 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740 2024-12-09T03:51:02,451 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740 2024-12-09T03:51:02,454 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:51:02,454 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:51:02,455 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:51:02,458 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:51:02,460 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62237433, jitterRate=-0.07258997857570648}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:51:02,460 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:51:02,463 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733716262434Writing region info on filesystem at 1733716262434Initializing all the Stores at 1733716262437 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716262437Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716262437Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716262437Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716262437Cleaning up temporary data from old regions at 1733716262454 (+17 ms)Running coprocessor post-open hooks at 1733716262460 (+6 ms)Region opened successfully at 1733716262463 (+3 ms) 2024-12-09T03:51:02,471 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733716262364 2024-12-09T03:51:02,512 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:51:02,512 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:51:02,515 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=55d0183f16d2,36451,1733716260295 2024-12-09T03:51:02,518 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 55d0183f16d2,36451,1733716260295, state=OPEN 2024-12-09T03:51:02,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:02,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:02,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:02,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:02,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:02,523 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=55d0183f16d2,36451,1733716260295 2024-12-09T03:51:02,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:51:02,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=55d0183f16d2,36451,1733716260295 in 326 msec 2024-12-09T03:51:02,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:51:02,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 796 msec 2024-12-09T03:51:02,545 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:02,545 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:51:02,565 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:51:02,566 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=55d0183f16d2,36451,1733716260295, seqNum=-1] 2024-12-09T03:51:02,586 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:51:02,588 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47301, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:51:02,605 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1210 sec 2024-12-09T03:51:02,606 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733716262605, completionTime=-1 2024-12-09T03:51:02,608 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T03:51:02,608 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:51:02,635 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T03:51:02,635 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733716322635 2024-12-09T03:51:02,635 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733716382635 2024-12-09T03:51:02,636 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-09T03:51:02,637 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T03:51:02,643 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42121,1733716259597-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,644 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42121,1733716259597-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,644 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42121,1733716259597-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,645 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-55d0183f16d2:42121, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,646 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,646 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,654 DEBUG [master/55d0183f16d2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:51:02,676 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.104sec 2024-12-09T03:51:02,678 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:51:02,679 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:51:02,680 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:51:02,680 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:51:02,681 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:51:02,681 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42121,1733716259597-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:02,682 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42121,1733716259597-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:51:02,686 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:51:02,687 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:51:02,687 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,42121,1733716259597-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:02,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72d63dcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:51:02,728 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T03:51:02,728 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T03:51:02,732 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 55d0183f16d2,42121,-1 for getting cluster id 2024-12-09T03:51:02,734 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:51:02,742 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e9914944-b583-4516-833d-8439e5208c15' 2024-12-09T03:51:02,745 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:51:02,745 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e9914944-b583-4516-833d-8439e5208c15" 2024-12-09T03:51:02,745 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb2d562, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:51:02,745 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [55d0183f16d2,42121,-1] 2024-12-09T03:51:02,748 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:51:02,750 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:02,751 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:51:02,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f46cc90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:51:02,754 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:51:02,761 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=55d0183f16d2,36451,1733716260295, seqNum=-1] 2024-12-09T03:51:02,761 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:51:02,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55470, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:51:02,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=55d0183f16d2,42121,1733716259597 2024-12-09T03:51:02,787 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:51:02,791 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 55d0183f16d2,42121,1733716259597 2024-12-09T03:51:02,793 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@57271115 2024-12-09T03:51:02,794 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:51:02,796 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:51:02,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:51:02,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T03:51:02,810 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:51:02,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T03:51:02,813 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:02,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:51:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:02,827 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:02,827 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:02,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58284 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58284 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:02,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-09T03:51:02,841 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:02,844 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0790325eb88375d7ba7848eeb703e235, NAME => 'TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68 2024-12-09T03:51:02,850 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:02,850 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:02,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:35770 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35770 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:02,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-09T03:51:02,861 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:02,861 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:02,861 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 0790325eb88375d7ba7848eeb703e235, disabling compactions & flushes 2024-12-09T03:51:02,861 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:02,862 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:02,862 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. after waiting 0 ms 2024-12-09T03:51:02,862 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:02,862 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:02,862 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0790325eb88375d7ba7848eeb703e235: Waiting for close lock at 1733716262861Disabling compacts and flushes for region at 1733716262861Disabling writes for close at 1733716262862 (+1 ms)Writing region close event to WAL at 1733716262862Closed at 1733716262862 2024-12-09T03:51:02,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:51:02,871 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733716262865"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733716262865"}]},"ts":"1733716262865"} 2024-12-09T03:51:02,877 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:51:02,879 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:51:02,882 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733716262879"}]},"ts":"1733716262879"} 2024-12-09T03:51:02,886 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T03:51:02,887 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {55d0183f16d2=0} racks are {/default-rack=0} 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T03:51:02,888 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T03:51:02,888 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T03:51:02,888 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T03:51:02,888 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T03:51:02,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0790325eb88375d7ba7848eeb703e235, ASSIGN}] 2024-12-09T03:51:02,891 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0790325eb88375d7ba7848eeb703e235, ASSIGN 2024-12-09T03:51:02,893 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0790325eb88375d7ba7848eeb703e235, ASSIGN; state=OFFLINE, location=55d0183f16d2,42313,1733716260475; forceNewPlan=false, retain=false 2024-12-09T03:51:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:03,046 INFO [55d0183f16d2:42121 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T03:51:03,047 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0790325eb88375d7ba7848eeb703e235, regionState=OPENING, regionLocation=55d0183f16d2,42313,1733716260475 2024-12-09T03:51:03,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0790325eb88375d7ba7848eeb703e235, ASSIGN because future has completed 2024-12-09T03:51:03,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0790325eb88375d7ba7848eeb703e235, server=55d0183f16d2,42313,1733716260475}] 2024-12-09T03:51:03,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:03,206 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:51:03,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55935, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:51:03,215 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:03,216 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0790325eb88375d7ba7848eeb703e235, NAME => 'TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:51:03,216 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,216 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:03,216 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,216 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,218 INFO [StoreOpener-0790325eb88375d7ba7848eeb703e235-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,221 INFO [StoreOpener-0790325eb88375d7ba7848eeb703e235-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0790325eb88375d7ba7848eeb703e235 columnFamilyName cf 2024-12-09T03:51:03,221 DEBUG [StoreOpener-0790325eb88375d7ba7848eeb703e235-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:03,222 INFO [StoreOpener-0790325eb88375d7ba7848eeb703e235-1 {}] regionserver.HStore(327): Store=0790325eb88375d7ba7848eeb703e235/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:03,222 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,223 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,224 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,224 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,224 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,227 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,233 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:51:03,234 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0790325eb88375d7ba7848eeb703e235; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61141915, jitterRate=-0.08891446888446808}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:51:03,234 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:03,236 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0790325eb88375d7ba7848eeb703e235: Running coprocessor pre-open hook at 1733716263217Writing region info on filesystem at 1733716263217Initializing all the Stores at 1733716263218 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716263218Cleaning up temporary data from old regions at 1733716263224 (+6 ms)Running coprocessor post-open hooks at 1733716263234 (+10 ms)Region opened successfully at 1733716263236 (+2 ms) 2024-12-09T03:51:03,238 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235., pid=6, masterSystemTime=1733716263206 2024-12-09T03:51:03,241 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:03,241 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:03,242 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0790325eb88375d7ba7848eeb703e235, regionState=OPEN, openSeqNum=2, regionLocation=55d0183f16d2,42313,1733716260475 2024-12-09T03:51:03,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0790325eb88375d7ba7848eeb703e235, server=55d0183f16d2,42313,1733716260475 because future has completed 2024-12-09T03:51:03,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:51:03,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0790325eb88375d7ba7848eeb703e235, server=55d0183f16d2,42313,1733716260475 in 196 msec 2024-12-09T03:51:03,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:51:03,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0790325eb88375d7ba7848eeb703e235, ASSIGN in 363 msec 2024-12-09T03:51:03,257 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:51:03,258 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733716263257"}]},"ts":"1733716263257"} 2024-12-09T03:51:03,260 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T03:51:03,262 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:51:03,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 459 msec 2024-12-09T03:51:03,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:03,449 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T03:51:03,449 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T03:51:03,450 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:51:03,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T03:51:03,455 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:51:03,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T03:51:03,463 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235., hostname=55d0183f16d2,42313,1733716260475, seqNum=2] 2024-12-09T03:51:03,464 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:51:03,466 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:51:03,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T03:51:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T03:51:03,482 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T03:51:03,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:03,484 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T03:51:03,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T03:51:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:03,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42313 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T03:51:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:03,651 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0790325eb88375d7ba7848eeb703e235 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T03:51:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/.tmp/cf/50d47dc21d474ed591ec1c858f7d1e7a is 36, key is row/cf:cq/1733716263467/Put/seqid=0 2024-12-09T03:51:03,722 WARN [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:03,722 WARN [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:03,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_309570688_22 at /127.0.0.1:35784 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35784 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:03,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-09T03:51:03,731 WARN [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:03,731 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/.tmp/cf/50d47dc21d474ed591ec1c858f7d1e7a 2024-12-09T03:51:03,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/.tmp/cf/50d47dc21d474ed591ec1c858f7d1e7a as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/cf/50d47dc21d474ed591ec1c858f7d1e7a 2024-12-09T03:51:03,790 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/cf/50d47dc21d474ed591ec1c858f7d1e7a, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T03:51:03,797 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 0790325eb88375d7ba7848eeb703e235 in 146ms, sequenceid=5, compaction requested=false 2024-12-09T03:51:03,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-09T03:51:03,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:03,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0790325eb88375d7ba7848eeb703e235: 2024-12-09T03:51:03,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:03,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T03:51:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T03:51:03,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T03:51:03,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 320 msec 2024-12-09T03:51:03,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 335 msec 2024-12-09T03:51:04,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:04,109 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T03:51:04,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:51:04,123 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:51:04,123 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:04,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,128 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:51:04,129 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:51:04,129 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=875678016, stopped=false 2024-12-09T03:51:04,129 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=55d0183f16d2,42121,1733716259597 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:04,131 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:51:04,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:04,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:04,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:04,133 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:51:04,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:04,133 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:04,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,133 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '55d0183f16d2,36451,1733716260295' ***** 2024-12-09T03:51:04,134 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:51:04,134 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '55d0183f16d2,43719,1733716260418' ***** 2024-12-09T03:51:04,134 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:51:04,134 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '55d0183f16d2,42313,1733716260475' ***** 2024-12-09T03:51:04,134 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:51:04,134 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:51:04,134 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:51:04,134 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:51:04,134 INFO [RS:1;55d0183f16d2:43719 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:51:04,134 INFO [RS:2;55d0183f16d2:42313 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:51:04,134 INFO [RS:0;55d0183f16d2:36451 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:51:04,134 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:51:04,135 INFO [RS:2;55d0183f16d2:42313 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:51:04,135 INFO [RS:1;55d0183f16d2:43719 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:51:04,135 INFO [RS:0;55d0183f16d2:36451 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:51:04,135 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:51:04,135 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(959): stopping server 55d0183f16d2,43719,1733716260418 2024-12-09T03:51:04,135 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(959): stopping server 55d0183f16d2,36451,1733716260295 2024-12-09T03:51:04,135 INFO [RS:1;55d0183f16d2:43719 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:04,135 INFO [RS:0;55d0183f16d2:36451 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:04,135 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:51:04,135 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(3091): Received CLOSE for 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:04,135 INFO [RS:1;55d0183f16d2:43719 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;55d0183f16d2:43719. 2024-12-09T03:51:04,135 INFO [RS:0;55d0183f16d2:36451 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;55d0183f16d2:36451. 2024-12-09T03:51:04,135 DEBUG [RS:1;55d0183f16d2:43719 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:04,135 DEBUG [RS:0;55d0183f16d2:36451 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:04,135 DEBUG [RS:1;55d0183f16d2:43719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,135 DEBUG [RS:0;55d0183f16d2:36451 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,135 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(976): stopping server 55d0183f16d2,43719,1733716260418; all regions closed. 2024-12-09T03:51:04,135 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:51:04,135 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(959): stopping server 55d0183f16d2,42313,1733716260475 2024-12-09T03:51:04,136 INFO [RS:2;55d0183f16d2:42313 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:04,136 INFO [RS:2;55d0183f16d2:42313 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;55d0183f16d2:42313. 2024-12-09T03:51:04,136 DEBUG [RS:2;55d0183f16d2:42313 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:04,136 DEBUG [RS:2;55d0183f16d2:42313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,136 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T03:51:04,136 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:51:04,136 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0790325eb88375d7ba7848eeb703e235, disabling compactions & flushes 2024-12-09T03:51:04,136 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:51:04,136 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1325): Online Regions={0790325eb88375d7ba7848eeb703e235=TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235.} 2024-12-09T03:51:04,136 INFO [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:04,136 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:51:04,136 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:04,136 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. after waiting 0 ms 2024-12-09T03:51:04,136 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:04,137 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T03:51:04,137 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:51:04,137 DEBUG [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1351): Waiting on 0790325eb88375d7ba7848eeb703e235 2024-12-09T03:51:04,137 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:51:04,137 DEBUG [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T03:51:04,137 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:51:04,137 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:51:04,137 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:51:04,137 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:51:04,138 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T03:51:04,140 INFO [regionserver/55d0183f16d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:04,142 INFO [regionserver/55d0183f16d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_1073741826_1016 (size=93) 2024-12-09T03:51:04,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741826_1016 (size=93) 2024-12-09T03:51:04,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_1073741826_1016 (size=93) 2024-12-09T03:51:04,148 INFO [regionserver/55d0183f16d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:04,150 DEBUG [RS:1;55d0183f16d2:43719 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs 2024-12-09T03:51:04,150 INFO [RS:1;55d0183f16d2:43719 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 55d0183f16d2%2C43719%2C1733716260418:(num 1733716261900) 2024-12-09T03:51:04,151 DEBUG [RS:1;55d0183f16d2:43719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] hbase.ChoreService(370): Chore service for: regionserver/55d0183f16d2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:51:04,151 INFO [regionserver/55d0183f16d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:51:04,151 INFO [RS:1;55d0183f16d2:43719 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:04,152 INFO [RS:1;55d0183f16d2:43719 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43719 2024-12-09T03:51:04,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/55d0183f16d2,43719,1733716260418 2024-12-09T03:51:04,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:04,156 INFO [RS:1;55d0183f16d2:43719 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:04,158 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [55d0183f16d2,43719,1733716260418] 2024-12-09T03:51:04,161 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/55d0183f16d2,43719,1733716260418 already deleted, retry=false 2024-12-09T03:51:04,161 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 55d0183f16d2,43719,1733716260418 expired; onlineServers=2 2024-12-09T03:51:04,166 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/default/TestHBaseWalOnEC/0790325eb88375d7ba7848eeb703e235/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T03:51:04,168 INFO [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:04,169 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0790325eb88375d7ba7848eeb703e235: Waiting for close lock at 1733716264136Running coprocessor pre-close hooks at 1733716264136Disabling compacts and flushes for region at 1733716264136Disabling writes for close at 1733716264136Writing region close event to WAL at 1733716264138 (+2 ms)Running coprocessor post-close hooks at 1733716264167 (+29 ms)Closed at 1733716264168 (+1 ms) 2024-12-09T03:51:04,169 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235. 2024-12-09T03:51:04,177 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/info/392833c577b848adaa7f402985eef649 is 153, key is TestHBaseWalOnEC,,1733716262797.0790325eb88375d7ba7848eeb703e235./info:regioninfo/1733716263242/Put/seqid=0 2024-12-09T03:51:04,181 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,181 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-351410883_22 at /127.0.0.1:58304 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58304 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-09T03:51:04,190 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,190 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/info/392833c577b848adaa7f402985eef649 2024-12-09T03:51:04,219 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/ns/34b5cfe013cf4b1a89b2a2edf4879a29 is 43, key is default/ns:d/1733716262592/Put/seqid=0 2024-12-09T03:51:04,221 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,221 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,224 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-351410883_22 at /127.0.0.1:35804 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35804 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-09T03:51:04,230 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,230 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/ns/34b5cfe013cf4b1a89b2a2edf4879a29 2024-12-09T03:51:04,256 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/table/8fc21505807546c6a1e06d443146110d is 52, key is TestHBaseWalOnEC/table:state/1733716263257/Put/seqid=0 2024-12-09T03:51:04,258 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,258 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x10195589a9a0002, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,259 INFO [RS:1;55d0183f16d2:43719 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:04,259 INFO [RS:1;55d0183f16d2:43719 {}] regionserver.HRegionServer(1031): Exiting; stopping=55d0183f16d2,43719,1733716260418; zookeeper connection closed. 2024-12-09T03:51:04,259 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@483ba03f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@483ba03f 2024-12-09T03:51:04,261 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-351410883_22 at /127.0.0.1:41824 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:42491:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41824 dst: /127.0.0.1:42491 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-09T03:51:04,266 WARN [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,266 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/table/8fc21505807546c6a1e06d443146110d 2024-12-09T03:51:04,276 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/info/392833c577b848adaa7f402985eef649 as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/info/392833c577b848adaa7f402985eef649 2024-12-09T03:51:04,285 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/info/392833c577b848adaa7f402985eef649, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T03:51:04,286 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/ns/34b5cfe013cf4b1a89b2a2edf4879a29 as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/ns/34b5cfe013cf4b1a89b2a2edf4879a29 2024-12-09T03:51:04,295 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/ns/34b5cfe013cf4b1a89b2a2edf4879a29, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T03:51:04,296 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/.tmp/table/8fc21505807546c6a1e06d443146110d as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/table/8fc21505807546c6a1e06d443146110d 2024-12-09T03:51:04,304 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/table/8fc21505807546c6a1e06d443146110d, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T03:51:04,305 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 168ms, sequenceid=11, compaction requested=false 2024-12-09T03:51:04,305 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:51:04,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-09T03:51:04,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-09T03:51:04,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-09T03:51:04,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-09T03:51:04,327 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T03:51:04,328 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:51:04,328 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:51:04,329 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733716264137Running coprocessor pre-close hooks at 1733716264137Disabling compacts and flushes for region at 1733716264137Disabling writes for close at 1733716264137Obtaining lock to block concurrent updates at 1733716264138 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733716264138Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733716264139 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733716264140 (+1 ms)Flushing 1588230740/info: creating writer at 1733716264140Flushing 1588230740/info: appending metadata at 1733716264173 (+33 ms)Flushing 1588230740/info: closing flushed file at 1733716264173Flushing 1588230740/ns: creating writer at 1733716264202 (+29 ms)Flushing 1588230740/ns: appending metadata at 1733716264218 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733716264218Flushing 1588230740/table: creating writer at 1733716264239 (+21 ms)Flushing 1588230740/table: appending metadata at 1733716264255 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733716264255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ea7d85: reopening flushed file at 1733716264275 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f568a0d: reopening flushed file at 1733716264285 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47d2f9cd: reopening flushed file at 1733716264295 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 168ms, sequenceid=11, compaction requested=false at 1733716264305 (+10 ms)Writing region close event to WAL at 1733716264307 (+2 ms)Running coprocessor post-close hooks at 1733716264328 (+21 ms)Closed at 1733716264328 2024-12-09T03:51:04,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-09T03:51:04,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-09T03:51:04,329 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:51:04,337 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(976): stopping server 55d0183f16d2,42313,1733716260475; all regions closed. 2024-12-09T03:51:04,337 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(976): stopping server 55d0183f16d2,36451,1733716260295; all regions closed. 2024-12-09T03:51:04,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741827_1017 (size=1298) 2024-12-09T03:51:04,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_1073741829_1019 (size=2751) 2024-12-09T03:51:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_1073741829_1019 (size=2751) 2024-12-09T03:51:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741829_1019 (size=2751) 2024-12-09T03:51:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_1073741827_1017 (size=1298) 2024-12-09T03:51:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_1073741827_1017 (size=1298) 2024-12-09T03:51:04,345 DEBUG [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs 2024-12-09T03:51:04,345 INFO [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 55d0183f16d2%2C36451%2C1733716260295.meta:.meta(num 1733716262395) 2024-12-09T03:51:04,345 DEBUG [RS:2;55d0183f16d2:42313 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs 2024-12-09T03:51:04,345 INFO [RS:2;55d0183f16d2:42313 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 55d0183f16d2%2C42313%2C1733716260475:(num 1733716261900) 2024-12-09T03:51:04,345 DEBUG [RS:2;55d0183f16d2:42313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,345 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:04,345 INFO [RS:2;55d0183f16d2:42313 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:04,346 INFO [RS:2;55d0183f16d2:42313 {}] hbase.ChoreService(370): Chore service for: regionserver/55d0183f16d2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:04,346 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:51:04,346 INFO [regionserver/55d0183f16d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:04,346 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:51:04,346 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:51:04,346 INFO [RS:2;55d0183f16d2:42313 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:04,346 INFO [RS:2;55d0183f16d2:42313 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42313 2024-12-09T03:51:04,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_1073741828_1018 (size=93) 2024-12-09T03:51:04,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741828_1018 (size=93) 2024-12-09T03:51:04,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:04,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/55d0183f16d2,42313,1733716260475 2024-12-09T03:51:04,349 INFO [RS:2;55d0183f16d2:42313 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:04,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_1073741828_1018 (size=93) 2024-12-09T03:51:04,351 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [55d0183f16d2,42313,1733716260475] 2024-12-09T03:51:04,352 DEBUG [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/oldWALs 2024-12-09T03:51:04,352 INFO [RS:0;55d0183f16d2:36451 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 55d0183f16d2%2C36451%2C1733716260295:(num 1733716261902) 2024-12-09T03:51:04,352 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/55d0183f16d2,42313,1733716260475 already deleted, retry=false 2024-12-09T03:51:04,352 DEBUG [RS:0;55d0183f16d2:36451 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:04,352 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:04,352 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 55d0183f16d2,42313,1733716260475 expired; onlineServers=1 2024-12-09T03:51:04,352 INFO [RS:0;55d0183f16d2:36451 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:04,352 INFO [RS:0;55d0183f16d2:36451 {}] hbase.ChoreService(370): Chore service for: regionserver/55d0183f16d2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:04,352 INFO [RS:0;55d0183f16d2:36451 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:04,353 INFO [regionserver/55d0183f16d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:04,353 INFO [RS:0;55d0183f16d2:36451 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36451 2024-12-09T03:51:04,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:04,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/55d0183f16d2,36451,1733716260295 2024-12-09T03:51:04,356 INFO [RS:0;55d0183f16d2:36451 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:04,358 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [55d0183f16d2,36451,1733716260295] 2024-12-09T03:51:04,359 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/55d0183f16d2,36451,1733716260295 already deleted, retry=false 2024-12-09T03:51:04,359 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 55d0183f16d2,36451,1733716260295 expired; onlineServers=0 2024-12-09T03:51:04,359 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '55d0183f16d2,42121,1733716259597' ***** 2024-12-09T03:51:04,359 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:51:04,359 INFO [M:0;55d0183f16d2:42121 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:04,359 INFO [M:0;55d0183f16d2:42121 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:04,359 DEBUG [M:0;55d0183f16d2:42121 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:51:04,360 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:51:04,360 DEBUG [M:0;55d0183f16d2:42121 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:51:04,360 DEBUG [master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.small.0-1733716261599 {}] cleaner.HFileCleaner(306): Exit Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.small.0-1733716261599,5,FailOnTimeoutGroup] 2024-12-09T03:51:04,360 DEBUG [master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.large.0-1733716261598 {}] cleaner.HFileCleaner(306): Exit Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.large.0-1733716261598,5,FailOnTimeoutGroup] 2024-12-09T03:51:04,360 INFO [M:0;55d0183f16d2:42121 {}] hbase.ChoreService(370): Chore service for: master/55d0183f16d2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:04,360 INFO [M:0;55d0183f16d2:42121 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:04,360 DEBUG [M:0;55d0183f16d2:42121 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:51:04,360 INFO [M:0;55d0183f16d2:42121 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:51:04,360 INFO [M:0;55d0183f16d2:42121 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:51:04,361 INFO [M:0;55d0183f16d2:42121 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:51:04,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:04,361 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:51:04,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:04,362 DEBUG [M:0;55d0183f16d2:42121 {}] zookeeper.ZKUtil(347): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:51:04,362 WARN [M:0;55d0183f16d2:42121 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:51:04,363 INFO [M:0;55d0183f16d2:42121 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/.lastflushedseqids 2024-12-09T03:51:04,372 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,372 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58342 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58342 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-09T03:51:04,379 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,379 INFO [M:0;55d0183f16d2:42121 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:51:04,379 INFO [M:0;55d0183f16d2:42121 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:51:04,379 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:51:04,379 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:04,379 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:04,379 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:51:04,379 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:04,379 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T03:51:04,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-09T03:51:04,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-09T03:51:04,398 DEBUG [M:0;55d0183f16d2:42121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a35d309cf59c4dd8a1047f9e5c3bcb63 is 82, key is hbase:meta,,1/info:regioninfo/1733716262515/Put/seqid=0 2024-12-09T03:51:04,400 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,400 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:58380 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:39673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58380 dst: /127.0.0.1:39673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-09T03:51:04,407 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,407 INFO [M:0;55d0183f16d2:42121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a35d309cf59c4dd8a1047f9e5c3bcb63 2024-12-09T03:51:04,431 DEBUG [M:0;55d0183f16d2:42121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b834eecd73b242afb798e7b1ad5e3fb3 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733716263264/Put/seqid=0 2024-12-09T03:51:04,433 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,433 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:41878 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:42491:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41878 dst: /127.0.0.1:42491 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-09T03:51:04,442 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,442 INFO [M:0;55d0183f16d2:42121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b834eecd73b242afb798e7b1ad5e3fb3 2024-12-09T03:51:04,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,451 INFO [RS:2;55d0183f16d2:42313 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:04,451 INFO [RS:2;55d0183f16d2:42313 {}] regionserver.HRegionServer(1031): Exiting; stopping=55d0183f16d2,42313,1733716260475; zookeeper connection closed. 2024-12-09T03:51:04,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42313-0x10195589a9a0003, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,451 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@638cf2ba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@638cf2ba 2024-12-09T03:51:04,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,458 INFO [RS:0;55d0183f16d2:36451 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:04,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36451-0x10195589a9a0001, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,458 INFO [RS:0;55d0183f16d2:36451 {}] regionserver.HRegionServer(1031): Exiting; stopping=55d0183f16d2,36451,1733716260295; zookeeper connection closed. 2024-12-09T03:51:04,458 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ba3afb5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ba3afb5 2024-12-09T03:51:04,459 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T03:51:04,468 DEBUG [M:0;55d0183f16d2:42121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d6cc5c7d46bf4430827be1429a1073f3 is 69, key is 55d0183f16d2,36451,1733716260295/rs:state/1733716261635/Put/seqid=0 2024-12-09T03:51:04,470 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,470 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T03:51:04,473 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-212142998_22 at /127.0.0.1:35842 [Receiving block BP-216086345-172.17.0.2-1733716256505:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35842 dst: /127.0.0.1:34573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:51:04,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-09T03:51:04,477 WARN [M:0;55d0183f16d2:42121 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T03:51:04,477 INFO [M:0;55d0183f16d2:42121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d6cc5c7d46bf4430827be1429a1073f3 2024-12-09T03:51:04,487 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a35d309cf59c4dd8a1047f9e5c3bcb63 as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a35d309cf59c4dd8a1047f9e5c3bcb63 2024-12-09T03:51:04,496 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a35d309cf59c4dd8a1047f9e5c3bcb63, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T03:51:04,498 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b834eecd73b242afb798e7b1ad5e3fb3 as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b834eecd73b242afb798e7b1ad5e3fb3 2024-12-09T03:51:04,505 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b834eecd73b242afb798e7b1ad5e3fb3, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T03:51:04,507 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d6cc5c7d46bf4430827be1429a1073f3 as hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d6cc5c7d46bf4430827be1429a1073f3 2024-12-09T03:51:04,515 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d6cc5c7d46bf4430827be1429a1073f3, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T03:51:04,516 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false 2024-12-09T03:51:04,518 INFO [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:04,518 DEBUG [M:0;55d0183f16d2:42121 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733716264379Disabling compacts and flushes for region at 1733716264379Disabling writes for close at 1733716264379Obtaining lock to block concurrent updates at 1733716264379Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733716264379Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733716264380 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733716264381 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733716264381Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733716264398 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733716264398Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733716264415 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733716264431 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733716264431Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733716264449 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733716264467 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733716264468 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c9278ab: reopening flushed file at 1733716264486 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17c3ddea: reopening flushed file at 1733716264496 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@704b7a8f: reopening flushed file at 1733716264506 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false at 1733716264516 (+10 ms)Writing region close event to WAL at 1733716264518 (+2 ms)Closed at 1733716264518 2024-12-09T03:51:04,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34573 is added to blk_1073741825_1011 (size=32683) 2024-12-09T03:51:04,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741825_1011 (size=32683) 2024-12-09T03:51:04,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39673 is added to blk_1073741825_1011 (size=32683) 2024-12-09T03:51:04,522 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:04,523 INFO [M:0;55d0183f16d2:42121 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:51:04,523 INFO [M:0;55d0183f16d2:42121 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42121 2024-12-09T03:51:04,523 INFO [M:0;55d0183f16d2:42121 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:04,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,626 INFO [M:0;55d0183f16d2:42121 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:04,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42121-0x10195589a9a0000, quorum=127.0.0.1:53617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:04,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:04,632 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:04,633 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:04,633 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:04,633 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:04,635 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:51:04,635 WARN [BP-216086345-172.17.0.2-1733716256505 heartbeating to localhost/127.0.0.1:46337 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:51:04,635 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:51:04,635 WARN [BP-216086345-172.17.0.2-1733716256505 heartbeating to localhost/127.0.0.1:46337 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-216086345-172.17.0.2-1733716256505 (Datanode Uuid fea5b7a2-fe98-4716-b8bc-c906225e966c) service to localhost/127.0.0.1:46337 2024-12-09T03:51:04,637 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data5/current/BP-216086345-172.17.0.2-1733716256505 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:04,637 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data6/current/BP-216086345-172.17.0.2-1733716256505 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:04,637 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:51:04,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:04,640 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:04,640 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:04,640 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:04,640 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:04,641 WARN [BP-216086345-172.17.0.2-1733716256505 heartbeating to localhost/127.0.0.1:46337 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:51:04,641 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:51:04,641 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:51:04,641 WARN [BP-216086345-172.17.0.2-1733716256505 heartbeating to localhost/127.0.0.1:46337 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-216086345-172.17.0.2-1733716256505 (Datanode Uuid 4dd6e86f-32df-42db-aee2-16fb3ec94110) service to localhost/127.0.0.1:46337 2024-12-09T03:51:04,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data3/current/BP-216086345-172.17.0.2-1733716256505 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:04,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data4/current/BP-216086345-172.17.0.2-1733716256505 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:04,642 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:51:04,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:04,645 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:04,645 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:04,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:04,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:04,647 WARN [BP-216086345-172.17.0.2-1733716256505 heartbeating to localhost/127.0.0.1:46337 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:51:04,647 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:51:04,647 WARN [BP-216086345-172.17.0.2-1733716256505 heartbeating to localhost/127.0.0.1:46337 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-216086345-172.17.0.2-1733716256505 (Datanode Uuid 01635950-7823-4032-b34f-cf031dfd9b5d) service to localhost/127.0.0.1:46337 2024-12-09T03:51:04,647 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:51:04,647 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data1/current/BP-216086345-172.17.0.2-1733716256505 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:04,648 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/cluster_8c4fc60b-54ae-9778-fc17-9d4c75daf902/data/data2/current/BP-216086345-172.17.0.2-1733716256505 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:04,648 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:51:04,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:51:04,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:04,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:04,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:04,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:04,669 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:51:04,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:51:04,706 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=153 (was 122) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=11995 (was 12306) 2024-12-09T03:51:04,712 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=153, ProcessCount=11, AvailableMemoryMB=11995 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.log.dir so I do NOT create it in target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/66fc5809-e595-1e4c-2fe7-3b3d937c8662/hadoop.tmp.dir so I do NOT create it in target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068, deleteOnExit=true 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/test.cache.data in system properties and HBase conf 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:51:04,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:51:04,714 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:51:04,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:51:04,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:51:04,804 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:51:04,811 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:51:04,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:51:04,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:51:04,813 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:51:04,815 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:51:04,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ad8d9de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:51:04,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e58533{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:51:04,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15027254{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/java.io.tmpdir/jetty-localhost-44229-hadoop-hdfs-3_4_1-tests_jar-_-any-4572321775427069826/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:51:04,933 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4293887f{HTTP/1.1, (http/1.1)}{localhost:44229} 2024-12-09T03:51:04,933 INFO [Time-limited test {}] server.Server(415): Started @10396ms 2024-12-09T03:51:05,031 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:51:05,036 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:51:05,036 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:51:05,036 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:51:05,037 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:51:05,037 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c4ebd49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:51:05,037 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72f96008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:51:05,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18f854cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/java.io.tmpdir/jetty-localhost-39527-hadoop-hdfs-3_4_1-tests_jar-_-any-4759147901385729175/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:05,154 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70fdfe33{HTTP/1.1, (http/1.1)}{localhost:39527} 2024-12-09T03:51:05,154 INFO [Time-limited test {}] server.Server(415): Started @10617ms 2024-12-09T03:51:05,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:51:05,188 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:51:05,191 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:51:05,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:51:05,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:51:05,192 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:51:05,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4148d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:51:05,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55cf3a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:51:05,261 WARN [Thread-520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data1/current/BP-2080191298-172.17.0.2-1733716264747/current, will proceed with Du for space computation calculation, 2024-12-09T03:51:05,261 WARN [Thread-521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data2/current/BP-2080191298-172.17.0.2-1733716264747/current, will proceed with Du for space computation calculation, 2024-12-09T03:51:05,281 WARN [Thread-499 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:51:05,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x341115f3da5a40d4 with lease ID 0xda3d029f34ff9d43: Processing first storage report for DS-8ffe2ad2-b326-4b88-9959-a3d1c138449d from datanode DatanodeRegistration(127.0.0.1:46383, datanodeUuid=cd1defce-e3d3-4aae-b252-7232425fba6e, infoPort=36817, infoSecurePort=0, ipcPort=32781, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747) 2024-12-09T03:51:05,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x341115f3da5a40d4 with lease ID 0xda3d029f34ff9d43: from storage DS-8ffe2ad2-b326-4b88-9959-a3d1c138449d node DatanodeRegistration(127.0.0.1:46383, datanodeUuid=cd1defce-e3d3-4aae-b252-7232425fba6e, infoPort=36817, infoSecurePort=0, ipcPort=32781, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:51:05,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x341115f3da5a40d4 with lease ID 0xda3d029f34ff9d43: Processing first storage report for DS-494d8299-ab83-46b7-9505-8d882e0661d2 from datanode DatanodeRegistration(127.0.0.1:46383, datanodeUuid=cd1defce-e3d3-4aae-b252-7232425fba6e, infoPort=36817, infoSecurePort=0, ipcPort=32781, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747) 2024-12-09T03:51:05,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x341115f3da5a40d4 with lease ID 0xda3d029f34ff9d43: from storage DS-494d8299-ab83-46b7-9505-8d882e0661d2 node DatanodeRegistration(127.0.0.1:46383, datanodeUuid=cd1defce-e3d3-4aae-b252-7232425fba6e, infoPort=36817, infoSecurePort=0, ipcPort=32781, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:51:05,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46f4cd0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/java.io.tmpdir/jetty-localhost-36191-hadoop-hdfs-3_4_1-tests_jar-_-any-13530109047500503681/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:05,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@700b2317{HTTP/1.1, (http/1.1)}{localhost:36191} 2024-12-09T03:51:05,316 INFO [Time-limited test {}] server.Server(415): Started @10779ms 2024-12-09T03:51:05,318 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:51:05,347 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:51:05,350 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:51:05,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:51:05,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:51:05,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:51:05,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44968fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:51:05,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40b03519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:51:05,417 WARN [Thread-555 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data3/current/BP-2080191298-172.17.0.2-1733716264747/current, will proceed with Du for space computation calculation, 2024-12-09T03:51:05,417 WARN [Thread-556 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data4/current/BP-2080191298-172.17.0.2-1733716264747/current, will proceed with Du for space computation calculation, 2024-12-09T03:51:05,435 WARN [Thread-535 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:51:05,438 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x186d5b139ca08426 with lease ID 0xda3d029f34ff9d44: Processing first storage report for DS-86bc53c8-585e-48e2-876f-866c90aae0e2 from datanode DatanodeRegistration(127.0.0.1:44565, datanodeUuid=67edc136-2845-40bd-a2c7-9d1a49595dc0, infoPort=33449, infoSecurePort=0, ipcPort=36007, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747) 2024-12-09T03:51:05,438 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x186d5b139ca08426 with lease ID 0xda3d029f34ff9d44: from storage DS-86bc53c8-585e-48e2-876f-866c90aae0e2 node DatanodeRegistration(127.0.0.1:44565, datanodeUuid=67edc136-2845-40bd-a2c7-9d1a49595dc0, infoPort=33449, infoSecurePort=0, ipcPort=36007, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:51:05,438 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x186d5b139ca08426 with lease ID 0xda3d029f34ff9d44: Processing first storage report for DS-e869d945-1494-40e7-b937-6981f816017d from datanode DatanodeRegistration(127.0.0.1:44565, datanodeUuid=67edc136-2845-40bd-a2c7-9d1a49595dc0, infoPort=33449, infoSecurePort=0, ipcPort=36007, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747) 2024-12-09T03:51:05,438 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x186d5b139ca08426 with lease ID 0xda3d029f34ff9d44: from storage DS-e869d945-1494-40e7-b937-6981f816017d node DatanodeRegistration(127.0.0.1:44565, datanodeUuid=67edc136-2845-40bd-a2c7-9d1a49595dc0, infoPort=33449, infoSecurePort=0, ipcPort=36007, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:51:05,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f50f857{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/java.io.tmpdir/jetty-localhost-40019-hadoop-hdfs-3_4_1-tests_jar-_-any-4039986794067937581/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:05,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7eeef71e{HTTP/1.1, (http/1.1)}{localhost:40019} 2024-12-09T03:51:05,470 INFO [Time-limited test {}] server.Server(415): Started @10933ms 2024-12-09T03:51:05,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:51:05,573 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data5/current/BP-2080191298-172.17.0.2-1733716264747/current, will proceed with Du for space computation calculation, 2024-12-09T03:51:05,573 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data6/current/BP-2080191298-172.17.0.2-1733716264747/current, will proceed with Du for space computation calculation, 2024-12-09T03:51:05,599 WARN [Thread-570 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:51:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ca01b23528ba295 with lease ID 0xda3d029f34ff9d45: Processing first storage report for DS-1d735524-52c1-4ad2-9ea2-321fbf80ff23 from datanode DatanodeRegistration(127.0.0.1:37799, datanodeUuid=61eecd26-d523-4d59-b7ab-20f29198f80f, infoPort=46091, infoSecurePort=0, ipcPort=43341, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747) 2024-12-09T03:51:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ca01b23528ba295 with lease ID 0xda3d029f34ff9d45: from storage DS-1d735524-52c1-4ad2-9ea2-321fbf80ff23 node DatanodeRegistration(127.0.0.1:37799, datanodeUuid=61eecd26-d523-4d59-b7ab-20f29198f80f, infoPort=46091, infoSecurePort=0, ipcPort=43341, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:51:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ca01b23528ba295 with lease ID 0xda3d029f34ff9d45: Processing first storage report for DS-84d408b4-bfdc-4fb9-90ea-77db3df48677 from datanode DatanodeRegistration(127.0.0.1:37799, datanodeUuid=61eecd26-d523-4d59-b7ab-20f29198f80f, infoPort=46091, infoSecurePort=0, ipcPort=43341, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747) 2024-12-09T03:51:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ca01b23528ba295 with lease ID 0xda3d029f34ff9d45: from storage DS-84d408b4-bfdc-4fb9-90ea-77db3df48677 node DatanodeRegistration(127.0.0.1:37799, datanodeUuid=61eecd26-d523-4d59-b7ab-20f29198f80f, infoPort=46091, infoSecurePort=0, ipcPort=43341, storageInfo=lv=-57;cid=testClusterID;nsid=201962637;c=1733716264747), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:51:05,699 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081 2024-12-09T03:51:05,702 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/zookeeper_0, clientPort=63236, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:51:05,702 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63236 2024-12-09T03:51:05,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,705 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:51:05,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:51:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:51:05,719 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569 with version=8 2024-12-09T03:51:05,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46337/user/jenkins/test-data/bc17d4c5-83c1-88c4-861c-d25c75c9ba68/hbase-staging 2024-12-09T03:51:05,721 INFO [Time-limited test {}] client.ConnectionUtils(128): master/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:05,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,722 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:05,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:05,722 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:51:05,722 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:05,723 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33217 2024-12-09T03:51:05,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33217 connecting to ZooKeeper ensemble=127.0.0.1:63236 2024-12-09T03:51:05,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:332170x0, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:05,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33217-0x1019558b5750000 connected 2024-12-09T03:51:05,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:05,749 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569, hbase.cluster.distributed=false 2024-12-09T03:51:05,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:05,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33217 2024-12-09T03:51:05,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33217 2024-12-09T03:51:05,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33217 2024-12-09T03:51:05,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33217 2024-12-09T03:51:05,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33217 2024-12-09T03:51:05,768 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:51:05,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:05,769 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37651 2024-12-09T03:51:05,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37651 connecting to ZooKeeper ensemble=127.0.0.1:63236 2024-12-09T03:51:05,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376510x0, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:05,779 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376510x0, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:05,779 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37651-0x1019558b5750001 connected 2024-12-09T03:51:05,779 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:51:05,780 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:51:05,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:51:05,782 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:05,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37651 2024-12-09T03:51:05,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37651 2024-12-09T03:51:05,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37651 2024-12-09T03:51:05,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37651 2024-12-09T03:51:05,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37651 2024-12-09T03:51:05,798 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:51:05,798 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:05,799 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45049 2024-12-09T03:51:05,800 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45049 connecting to ZooKeeper ensemble=127.0.0.1:63236 2024-12-09T03:51:05,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450490x0, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:05,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:05,807 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45049-0x1019558b5750002 connected 2024-12-09T03:51:05,807 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:51:05,808 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:51:05,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:51:05,810 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:05,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45049 2024-12-09T03:51:05,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45049 2024-12-09T03:51:05,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45049 2024-12-09T03:51:05,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45049 2024-12-09T03:51:05,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45049 2024-12-09T03:51:05,826 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/55d0183f16d2:0 server-side Connection retries=45 2024-12-09T03:51:05,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:51:05,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:51:05,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:51:05,826 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:51:05,827 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:51:05,827 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41463 2024-12-09T03:51:05,829 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41463 connecting to ZooKeeper ensemble=127.0.0.1:63236 2024-12-09T03:51:05,829 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:414630x0, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:51:05,835 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41463-0x1019558b5750003 connected 2024-12-09T03:51:05,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:05,836 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:51:05,836 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:51:05,837 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:51:05,838 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:51:05,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41463 2024-12-09T03:51:05,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41463 2024-12-09T03:51:05,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41463 2024-12-09T03:51:05,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41463 2024-12-09T03:51:05,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41463 2024-12-09T03:51:05,851 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;55d0183f16d2:33217 2024-12-09T03:51:05,852 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/55d0183f16d2,33217,1733716265721 2024-12-09T03:51:05,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,854 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/55d0183f16d2,33217,1733716265721 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,858 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:51:05,858 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/55d0183f16d2,33217,1733716265721 from backup master directory 2024-12-09T03:51:05,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/55d0183f16d2,33217,1733716265721 2024-12-09T03:51:05,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:51:05,862 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:05,862 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=55d0183f16d2,33217,1733716265721 2024-12-09T03:51:05,869 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/hbase.id] with ID: 45c67599-91fe-4dca-a80c-c08c9d455854 2024-12-09T03:51:05,869 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/.tmp/hbase.id 2024-12-09T03:51:05,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:51:05,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:51:05,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:51:05,879 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/.tmp/hbase.id]:[hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/hbase.id] 2024-12-09T03:51:05,895 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:51:05,895 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:51:05,897 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T03:51:05,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:05,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:51:05,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:51:05,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:51:05,911 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:51:05,912 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:51:05,913 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:51:05,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:51:05,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:51:05,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:51:05,927 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store 2024-12-09T03:51:05,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:51:05,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:51:05,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:51:05,938 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:05,938 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:51:05,938 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:05,938 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:05,938 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:51:05,938 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:05,938 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:05,938 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733716265938Disabling compacts and flushes for region at 1733716265938Disabling writes for close at 1733716265938Writing region close event to WAL at 1733716265938Closed at 1733716265938 2024-12-09T03:51:05,940 WARN [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/.initializing 2024-12-09T03:51:05,940 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/WALs/55d0183f16d2,33217,1733716265721 2024-12-09T03:51:05,944 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C33217%2C1733716265721, suffix=, logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/WALs/55d0183f16d2,33217,1733716265721, archiveDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/oldWALs, maxLogs=10 2024-12-09T03:51:05,945 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 55d0183f16d2%2C33217%2C1733716265721.1733716265944 2024-12-09T03:51:05,955 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/WALs/55d0183f16d2,33217,1733716265721/55d0183f16d2%2C33217%2C1733716265721.1733716265944 2024-12-09T03:51:05,958 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33449:33449),(127.0.0.1/127.0.0.1:46091:46091),(127.0.0.1/127.0.0.1:36817:36817)] 2024-12-09T03:51:05,960 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:51:05,960 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:05,960 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,960 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:51:05,964 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:05,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:05,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:51:05,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:05,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:05,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:51:05,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:05,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:05,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:51:05,971 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:05,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:05,972 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,972 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,973 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,974 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,974 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,975 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:51:05,976 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:51:05,979 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:51:05,980 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62014888, jitterRate=-0.07590615749359131}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:51:05,980 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733716265960Initializing all the Stores at 1733716265962 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716265962Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716265962Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716265962Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716265962Cleaning up temporary data from old regions at 1733716265974 (+12 ms)Region opened successfully at 1733716265980 (+6 ms) 2024-12-09T03:51:05,981 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:51:05,985 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab34d99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:05,986 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:51:05,986 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:51:05,986 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:51:05,987 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:51:05,987 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:51:05,988 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:51:05,988 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:51:05,990 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:51:05,991 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:51:05,992 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:51:05,993 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:51:05,993 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:51:05,996 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:51:05,997 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:51:05,998 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:51:05,999 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:51:06,000 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:51:06,001 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:51:06,004 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:51:06,005 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,009 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=55d0183f16d2,33217,1733716265721, sessionid=0x1019558b5750000, setting cluster-up flag (Was=false) 2024-12-09T03:51:06,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,018 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:51:06,019 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=55d0183f16d2,33217,1733716265721 2024-12-09T03:51:06,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,028 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:51:06,029 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=55d0183f16d2,33217,1733716265721 2024-12-09T03:51:06,031 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:51:06,034 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:06,034 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:51:06,035 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:51:06,035 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 55d0183f16d2,33217,1733716265721 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:51:06,036 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/55d0183f16d2:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/55d0183f16d2:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:06,037 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733716296038 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:51:06,038 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:51:06,039 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,039 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:51:06,039 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:51:06,039 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:51:06,039 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:51:06,039 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:51:06,039 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:06,040 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:51:06,040 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.large.0-1733716266039,5,FailOnTimeoutGroup] 2024-12-09T03:51:06,040 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.small.0-1733716266040,5,FailOnTimeoutGroup] 2024-12-09T03:51:06,040 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,040 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:51:06,040 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,040 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,041 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,041 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:51:06,045 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(746): ClusterId : 45c67599-91fe-4dca-a80c-c08c9d455854 2024-12-09T03:51:06,045 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(746): ClusterId : 45c67599-91fe-4dca-a80c-c08c9d455854 2024-12-09T03:51:06,045 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:51:06,045 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(746): ClusterId : 45c67599-91fe-4dca-a80c-c08c9d455854 2024-12-09T03:51:06,045 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:51:06,045 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:51:06,049 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:51:06,049 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:51:06,049 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:51:06,049 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:51:06,049 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:51:06,049 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:51:06,052 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:51:06,052 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:51:06,052 DEBUG [RS:0;55d0183f16d2:37651 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b16eb61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:06,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:51:06,053 DEBUG [RS:2;55d0183f16d2:41463 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50e02e91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:06,053 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:51:06,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:51:06,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:51:06,053 DEBUG [RS:1;55d0183f16d2:45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319dda69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=55d0183f16d2/172.17.0.2:0 2024-12-09T03:51:06,055 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:51:06,055 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569 2024-12-09T03:51:06,066 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;55d0183f16d2:41463 2024-12-09T03:51:06,066 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:51:06,066 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:51:06,066 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:51:06,067 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(2659): reportForDuty to master=55d0183f16d2,33217,1733716265721 with port=41463, startcode=1733716265826 2024-12-09T03:51:06,068 DEBUG [RS:2;55d0183f16d2:41463 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:51:06,069 DEBUG [RS:0;55d0183f16d2:37651 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;55d0183f16d2:37651 2024-12-09T03:51:06,069 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:51:06,069 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:51:06,069 DEBUG [RS:1;55d0183f16d2:45049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;55d0183f16d2:45049 2024-12-09T03:51:06,069 DEBUG [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:51:06,069 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:51:06,069 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:51:06,069 DEBUG [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:51:06,070 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(2659): reportForDuty to master=55d0183f16d2,33217,1733716265721 with port=37651, startcode=1733716265767 2024-12-09T03:51:06,070 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(2659): reportForDuty to master=55d0183f16d2,33217,1733716265721 with port=45049, startcode=1733716265798 2024-12-09T03:51:06,070 DEBUG [RS:0;55d0183f16d2:37651 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:51:06,070 DEBUG [RS:1;55d0183f16d2:45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:51:06,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:51:06,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:51:06,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:51:06,072 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42491, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:51:06,073 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33217 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 55d0183f16d2,45049,1733716265798 2024-12-09T03:51:06,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:06,073 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33217 {}] master.ServerManager(517): Registering regionserver=55d0183f16d2,45049,1733716265798 2024-12-09T03:51:06,073 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60855, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:51:06,074 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42135, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:51:06,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:51:06,075 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33217 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 55d0183f16d2,37651,1733716265767 2024-12-09T03:51:06,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33217 {}] master.ServerManager(517): Registering regionserver=55d0183f16d2,37651,1733716265767 2024-12-09T03:51:06,076 DEBUG [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569 2024-12-09T03:51:06,076 DEBUG [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33743 2024-12-09T03:51:06,076 DEBUG [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:51:06,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:51:06,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,077 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33217 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,077 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33217 {}] master.ServerManager(517): Registering regionserver=55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:51:06,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:06,079 DEBUG [RS:1;55d0183f16d2:45049 {}] zookeeper.ZKUtil(111): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/55d0183f16d2,45049,1733716265798 2024-12-09T03:51:06,079 WARN [RS:1;55d0183f16d2:45049 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:06,079 INFO [RS:1;55d0183f16d2:45049 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:51:06,079 DEBUG [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,45049,1733716265798 2024-12-09T03:51:06,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:51:06,080 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [55d0183f16d2,45049,1733716265798] 2024-12-09T03:51:06,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,080 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569 2024-12-09T03:51:06,081 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33743 2024-12-09T03:51:06,081 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:51:06,081 DEBUG [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569 2024-12-09T03:51:06,081 DEBUG [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33743 2024-12-09T03:51:06,081 DEBUG [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:51:06,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:51:06,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:51:06,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:51:06,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:51:06,086 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:06,087 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:51:06,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740 2024-12-09T03:51:06,088 DEBUG [RS:2;55d0183f16d2:41463 {}] zookeeper.ZKUtil(111): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,088 WARN [RS:2;55d0183f16d2:41463 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:06,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [55d0183f16d2,37651,1733716265767] 2024-12-09T03:51:06,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [55d0183f16d2,41463,1733716265826] 2024-12-09T03:51:06,088 INFO [RS:2;55d0183f16d2:41463 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:51:06,088 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740 2024-12-09T03:51:06,089 DEBUG [RS:0;55d0183f16d2:37651 {}] zookeeper.ZKUtil(111): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/55d0183f16d2,37651,1733716265767 2024-12-09T03:51:06,089 WARN [RS:0;55d0183f16d2:37651 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:51:06,089 INFO [RS:0;55d0183f16d2:37651 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:51:06,090 DEBUG [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,37651,1733716265767 2024-12-09T03:51:06,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:51:06,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:51:06,091 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:51:06,093 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:51:06,093 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:51:06,095 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:51:06,095 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:51:06,096 INFO [RS:1;55d0183f16d2:45049 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:51:06,096 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,097 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:51:06,097 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:51:06,097 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66415079, jitterRate=-0.010338202118873596}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:51:06,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733716266073Initializing all the Stores at 1733716266074 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716266075 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716266075Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716266075Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716266075Cleaning up temporary data from old regions at 1733716266091 (+16 ms)Region opened successfully at 1733716266098 (+7 ms) 2024-12-09T03:51:06,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:51:06,098 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:51:06,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:51:06,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:51:06,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:51:06,099 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:51:06,099 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:51:06,100 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:51:06,100 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:51:06,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733716266098Disabling compacts and flushes for region at 1733716266098Disabling writes for close at 1733716266099 (+1 ms)Writing region close event to WAL at 1733716266100 (+1 ms)Closed at 1733716266100 2024-12-09T03:51:06,100 INFO [RS:0;55d0183f16d2:37651 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:51:06,100 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,100 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:51:06,100 INFO [RS:2;55d0183f16d2:41463 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:51:06,100 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,100 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,100 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,101 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,102 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,102 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:06,102 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:06,102 DEBUG [RS:1;55d0183f16d2:45049 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:06,102 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:51:06,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:51:06,104 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:51:06,104 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:51:06,105 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:51:06,105 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:51:06,105 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:51:06,105 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,106 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/55d0183f16d2:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,106 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:0;55d0183f16d2:37651 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/55d0183f16d2:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:06,107 DEBUG [RS:2;55d0183f16d2:41463 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:51:06,112 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,45049,1733716265798-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,112 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,41463,1733716265826-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:06,113 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,113 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,113 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,113 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,113 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,113 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,37651,1733716265767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:06,127 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:51:06,128 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,41463,1733716265826-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,128 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,128 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.Replication(171): 55d0183f16d2,41463,1733716265826 started 2024-12-09T03:51:06,128 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:51:06,129 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,37651,1733716265767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,129 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,129 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.Replication(171): 55d0183f16d2,37651,1733716265767 started 2024-12-09T03:51:06,129 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:51:06,129 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,45049,1733716265798-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,129 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,129 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.Replication(171): 55d0183f16d2,45049,1733716265798 started 2024-12-09T03:51:06,142 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,142 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1482): Serving as 55d0183f16d2,41463,1733716265826, RpcServer on 55d0183f16d2/172.17.0.2:41463, sessionid=0x1019558b5750003 2024-12-09T03:51:06,142 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:51:06,142 DEBUG [RS:2;55d0183f16d2:41463 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,142 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,41463,1733716265826' 2024-12-09T03:51:06,142 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:51:06,143 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:51:06,143 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,143 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,143 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(1482): Serving as 55d0183f16d2,45049,1733716265798, RpcServer on 55d0183f16d2/172.17.0.2:45049, sessionid=0x1019558b5750002 2024-12-09T03:51:06,143 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(1482): Serving as 55d0183f16d2,37651,1733716265767, RpcServer on 55d0183f16d2/172.17.0.2:37651, sessionid=0x1019558b5750001 2024-12-09T03:51:06,143 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:51:06,143 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:51:06,143 DEBUG [RS:1;55d0183f16d2:45049 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 55d0183f16d2,45049,1733716265798 2024-12-09T03:51:06,143 DEBUG [RS:0;55d0183f16d2:37651 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 55d0183f16d2,37651,1733716265767 2024-12-09T03:51:06,143 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,45049,1733716265798' 2024-12-09T03:51:06,143 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,37651,1733716265767' 2024-12-09T03:51:06,143 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:51:06,143 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:51:06,144 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:51:06,144 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:51:06,144 DEBUG [RS:2;55d0183f16d2:41463 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,144 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,41463,1733716265826' 2024-12-09T03:51:06,144 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:51:06,144 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:51:06,144 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:51:06,144 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:51:06,144 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:51:06,144 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:51:06,145 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:51:06,145 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:51:06,145 DEBUG [RS:0;55d0183f16d2:37651 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 55d0183f16d2,37651,1733716265767 2024-12-09T03:51:06,145 DEBUG [RS:1;55d0183f16d2:45049 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 55d0183f16d2,45049,1733716265798 2024-12-09T03:51:06,145 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,37651,1733716265767' 2024-12-09T03:51:06,145 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:51:06,145 DEBUG [RS:2;55d0183f16d2:41463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:51:06,145 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '55d0183f16d2,45049,1733716265798' 2024-12-09T03:51:06,145 INFO [RS:2;55d0183f16d2:41463 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:51:06,145 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:51:06,145 INFO [RS:2;55d0183f16d2:41463 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:51:06,145 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:51:06,145 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:51:06,146 DEBUG [RS:0;55d0183f16d2:37651 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:51:06,146 INFO [RS:0;55d0183f16d2:37651 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:51:06,146 DEBUG [RS:1;55d0183f16d2:45049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:51:06,146 INFO [RS:0;55d0183f16d2:37651 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:51:06,146 INFO [RS:1;55d0183f16d2:45049 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:51:06,146 INFO [RS:1;55d0183f16d2:45049 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:51:06,248 INFO [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C41463%2C1733716265826, suffix=, logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,41463,1733716265826, archiveDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs, maxLogs=32 2024-12-09T03:51:06,248 INFO [RS:1;55d0183f16d2:45049 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C45049%2C1733716265798, suffix=, logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,45049,1733716265798, archiveDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs, maxLogs=32 2024-12-09T03:51:06,248 INFO [RS:0;55d0183f16d2:37651 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C37651%2C1733716265767, suffix=, logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,37651,1733716265767, archiveDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs, maxLogs=32 2024-12-09T03:51:06,250 INFO [RS:2;55d0183f16d2:41463 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 55d0183f16d2%2C41463%2C1733716265826.1733716266249 2024-12-09T03:51:06,251 INFO [RS:1;55d0183f16d2:45049 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 55d0183f16d2%2C45049%2C1733716265798.1733716266250 2024-12-09T03:51:06,251 INFO [RS:0;55d0183f16d2:37651 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 55d0183f16d2%2C37651%2C1733716265767.1733716266251 2024-12-09T03:51:06,256 WARN [55d0183f16d2:33217 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:51:06,259 INFO [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,41463,1733716265826/55d0183f16d2%2C41463%2C1733716265826.1733716266249 2024-12-09T03:51:06,260 DEBUG [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33449:33449),(127.0.0.1/127.0.0.1:46091:46091),(127.0.0.1/127.0.0.1:36817:36817)] 2024-12-09T03:51:06,261 INFO [RS:1;55d0183f16d2:45049 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,45049,1733716265798/55d0183f16d2%2C45049%2C1733716265798.1733716266250 2024-12-09T03:51:06,261 INFO [RS:0;55d0183f16d2:37651 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,37651,1733716265767/55d0183f16d2%2C37651%2C1733716265767.1733716266251 2024-12-09T03:51:06,265 DEBUG [RS:1;55d0183f16d2:45049 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36817:36817),(127.0.0.1/127.0.0.1:46091:46091),(127.0.0.1/127.0.0.1:33449:33449)] 2024-12-09T03:51:06,265 DEBUG [RS:0;55d0183f16d2:37651 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33449:33449),(127.0.0.1/127.0.0.1:36817:36817),(127.0.0.1/127.0.0.1:46091:46091)] 2024-12-09T03:51:06,506 DEBUG [55d0183f16d2:33217 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T03:51:06,507 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(204): Hosts are {55d0183f16d2=0} racks are {/default-rack=0} 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T03:51:06,510 INFO [55d0183f16d2:33217 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T03:51:06,510 INFO [55d0183f16d2:33217 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T03:51:06,510 INFO [55d0183f16d2:33217 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T03:51:06,510 DEBUG [55d0183f16d2:33217 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T03:51:06,510 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,512 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 55d0183f16d2,41463,1733716265826, state=OPENING 2024-12-09T03:51:06,514 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:51:06,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:06,516 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:51:06,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=55d0183f16d2,41463,1733716265826}] 2024-12-09T03:51:06,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,670 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:51:06,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60263, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:51:06,677 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:51:06,677 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:51:06,680 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=55d0183f16d2%2C41463%2C1733716265826.meta, suffix=.meta, logDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,41463,1733716265826, archiveDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs, maxLogs=32 2024-12-09T03:51:06,681 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 55d0183f16d2%2C41463%2C1733716265826.meta.1733716266680.meta 2024-12-09T03:51:06,689 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/WALs/55d0183f16d2,41463,1733716265826/55d0183f16d2%2C41463%2C1733716265826.meta.1733716266680.meta 2024-12-09T03:51:06,689 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33449:33449),(127.0.0.1/127.0.0.1:36817:36817),(127.0.0.1/127.0.0.1:46091:46091)] 2024-12-09T03:51:06,690 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:51:06,690 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:51:06,690 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:51:06,691 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:51:06,691 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:51:06,691 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:06,691 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:51:06,691 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:51:06,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:51:06,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:51:06,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:51:06,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:51:06,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:51:06,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:51:06,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:51:06,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:51:06,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:51:06,701 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:51:06,702 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740 2024-12-09T03:51:06,703 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740 2024-12-09T03:51:06,705 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:51:06,705 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:51:06,705 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:51:06,707 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:51:06,708 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59024240, jitterRate=-0.12047028541564941}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:51:06,708 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:51:06,709 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733716266691Writing region info on filesystem at 1733716266691Initializing all the Stores at 1733716266692 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716266692Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716266693 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716266693Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733716266693Cleaning up temporary data from old regions at 1733716266705 (+12 ms)Running coprocessor post-open hooks at 1733716266708 (+3 ms)Region opened successfully at 1733716266709 (+1 ms) 2024-12-09T03:51:06,711 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733716266670 2024-12-09T03:51:06,714 DEBUG [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:51:06,714 INFO [RS_OPEN_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:51:06,715 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,717 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 55d0183f16d2,41463,1733716265826, state=OPEN 2024-12-09T03:51:06,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:06,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:06,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:06,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:51:06,720 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=55d0183f16d2,41463,1733716265826 2024-12-09T03:51:06,720 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,720 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,720 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,720 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:51:06,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:51:06,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=55d0183f16d2,41463,1733716265826 in 204 msec 2024-12-09T03:51:06,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:51:06,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 622 msec 2024-12-09T03:51:06,729 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:51:06,729 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:51:06,730 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:51:06,731 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=55d0183f16d2,41463,1733716265826, seqNum=-1] 2024-12-09T03:51:06,731 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:51:06,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38017, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:51:06,741 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 706 msec 2024-12-09T03:51:06,741 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733716266741, completionTime=-1 2024-12-09T03:51:06,741 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T03:51:06,741 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:51:06,744 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T03:51:06,744 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733716326744 2024-12-09T03:51:06,744 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733716386744 2024-12-09T03:51:06,744 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T03:51:06,744 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,33217,1733716265721-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,744 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,33217,1733716265721-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,745 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,33217,1733716265721-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,745 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-55d0183f16d2:33217, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,745 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,745 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,747 DEBUG [master/55d0183f16d2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.888sec 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,33217,1733716265721-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:51:06,750 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,33217,1733716265721-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:51:06,753 DEBUG [master/55d0183f16d2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:51:06,753 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:51:06,753 INFO [master/55d0183f16d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=55d0183f16d2,33217,1733716265721-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:51:06,845 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f519320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:51:06,846 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 55d0183f16d2,33217,-1 for getting cluster id 2024-12-09T03:51:06,846 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:51:06,847 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45c67599-91fe-4dca-a80c-c08c9d455854' 2024-12-09T03:51:06,848 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:51:06,848 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45c67599-91fe-4dca-a80c-c08c9d455854" 2024-12-09T03:51:06,848 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66c67376, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:51:06,848 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [55d0183f16d2,33217,-1] 2024-12-09T03:51:06,849 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:51:06,849 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:06,850 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:51:06,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3515910a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:51:06,852 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:51:06,853 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=55d0183f16d2,41463,1733716265826, seqNum=-1] 2024-12-09T03:51:06,853 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:51:06,855 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:51:06,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=55d0183f16d2,33217,1733716265721 2024-12-09T03:51:06,858 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:51:06,859 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 55d0183f16d2,33217,1733716265721 2024-12-09T03:51:06,859 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@53080dd7 2024-12-09T03:51:06,859 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:51:06,860 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:51:06,861 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:51:06,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T03:51:06,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:51:06,865 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:06,865 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T03:51:06,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:06,867 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:51:06,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741837_1013 (size=392) 2024-12-09T03:51:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741837_1013 (size=392) 2024-12-09T03:51:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741837_1013 (size=392) 2024-12-09T03:51:06,879 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 850ab1a436d32737e2568380ca0a08a4, NAME => 'TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569 2024-12-09T03:51:06,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741838_1014 (size=51) 2024-12-09T03:51:06,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741838_1014 (size=51) 2024-12-09T03:51:06,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741838_1014 (size=51) 2024-12-09T03:51:06,889 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:06,889 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 850ab1a436d32737e2568380ca0a08a4, disabling compactions & flushes 2024-12-09T03:51:06,889 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:06,889 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:06,889 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. after waiting 0 ms 2024-12-09T03:51:06,889 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:06,889 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:06,889 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 850ab1a436d32737e2568380ca0a08a4: Waiting for close lock at 1733716266889Disabling compacts and flushes for region at 1733716266889Disabling writes for close at 1733716266889Writing region close event to WAL at 1733716266889Closed at 1733716266889 2024-12-09T03:51:06,892 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:51:06,892 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733716266892"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733716266892"}]},"ts":"1733716266892"} 2024-12-09T03:51:06,895 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:51:06,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:51:06,896 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733716266896"}]},"ts":"1733716266896"} 2024-12-09T03:51:06,899 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T03:51:06,899 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {55d0183f16d2=0} racks are {/default-rack=0} 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T03:51:06,900 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T03:51:06,900 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T03:51:06,900 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T03:51:06,900 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T03:51:06,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=850ab1a436d32737e2568380ca0a08a4, ASSIGN}] 2024-12-09T03:51:06,902 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=850ab1a436d32737e2568380ca0a08a4, ASSIGN 2024-12-09T03:51:06,904 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=850ab1a436d32737e2568380ca0a08a4, ASSIGN; state=OFFLINE, location=55d0183f16d2,41463,1733716265826; forceNewPlan=false, retain=false 2024-12-09T03:51:06,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:07,054 INFO [55d0183f16d2:33217 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T03:51:07,055 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=850ab1a436d32737e2568380ca0a08a4, regionState=OPENING, regionLocation=55d0183f16d2,41463,1733716265826 2024-12-09T03:51:07,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=850ab1a436d32737e2568380ca0a08a4, ASSIGN because future has completed 2024-12-09T03:51:07,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 850ab1a436d32737e2568380ca0a08a4, server=55d0183f16d2,41463,1733716265826}] 2024-12-09T03:51:07,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:07,218 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,218 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 850ab1a436d32737e2568380ca0a08a4, NAME => 'TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:51:07,218 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,218 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:51:07,218 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,218 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,220 INFO [StoreOpener-850ab1a436d32737e2568380ca0a08a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,222 INFO [StoreOpener-850ab1a436d32737e2568380ca0a08a4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 850ab1a436d32737e2568380ca0a08a4 columnFamilyName cf 2024-12-09T03:51:07,222 DEBUG [StoreOpener-850ab1a436d32737e2568380ca0a08a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:51:07,222 INFO [StoreOpener-850ab1a436d32737e2568380ca0a08a4-1 {}] regionserver.HStore(327): Store=850ab1a436d32737e2568380ca0a08a4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:51:07,222 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,223 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,224 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,224 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,224 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,226 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,228 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:51:07,229 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 850ab1a436d32737e2568380ca0a08a4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63034345, jitterRate=-0.06071506440639496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:51:07,229 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,229 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 850ab1a436d32737e2568380ca0a08a4: Running coprocessor pre-open hook at 1733716267219Writing region info on filesystem at 1733716267219Initializing all the Stores at 1733716267220 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733716267220Cleaning up temporary data from old regions at 1733716267224 (+4 ms)Running coprocessor post-open hooks at 1733716267229 (+5 ms)Region opened successfully at 1733716267229 2024-12-09T03:51:07,231 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4., pid=6, masterSystemTime=1733716267213 2024-12-09T03:51:07,234 DEBUG [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,234 INFO [RS_OPEN_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,235 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=850ab1a436d32737e2568380ca0a08a4, regionState=OPEN, openSeqNum=2, regionLocation=55d0183f16d2,41463,1733716265826 2024-12-09T03:51:07,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 850ab1a436d32737e2568380ca0a08a4, server=55d0183f16d2,41463,1733716265826 because future has completed 2024-12-09T03:51:07,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:51:07,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 850ab1a436d32737e2568380ca0a08a4, server=55d0183f16d2,41463,1733716265826 in 181 msec 2024-12-09T03:51:07,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:51:07,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=850ab1a436d32737e2568380ca0a08a4, ASSIGN in 344 msec 2024-12-09T03:51:07,249 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:51:07,250 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733716267249"}]},"ts":"1733716267249"} 2024-12-09T03:51:07,252 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T03:51:07,254 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:51:07,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 393 msec 2024-12-09T03:51:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:51:07,499 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T03:51:07,499 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T03:51:07,499 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:51:07,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T03:51:07,503 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:51:07,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T03:51:07,506 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4., hostname=55d0183f16d2,41463,1733716265826, seqNum=2] 2024-12-09T03:51:07,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T03:51:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T03:51:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:07,514 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T03:51:07,515 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T03:51:07,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T03:51:07,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:07,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41463 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T03:51:07,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,671 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 850ab1a436d32737e2568380ca0a08a4 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T03:51:07,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/.tmp/cf/fd90f12680db4c2292144340e76de7f3 is 36, key is row/cf:cq/1733716267508/Put/seqid=0 2024-12-09T03:51:07,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741839_1015 (size=4787) 2024-12-09T03:51:07,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741839_1015 (size=4787) 2024-12-09T03:51:07,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741839_1015 (size=4787) 2024-12-09T03:51:07,698 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/.tmp/cf/fd90f12680db4c2292144340e76de7f3 2024-12-09T03:51:07,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/.tmp/cf/fd90f12680db4c2292144340e76de7f3 as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/cf/fd90f12680db4c2292144340e76de7f3 2024-12-09T03:51:07,715 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/cf/fd90f12680db4c2292144340e76de7f3, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T03:51:07,717 INFO [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 850ab1a436d32737e2568380ca0a08a4 in 45ms, sequenceid=5, compaction requested=false 2024-12-09T03:51:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 850ab1a436d32737e2568380ca0a08a4: 2024-12-09T03:51:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/55d0183f16d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T03:51:07,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T03:51:07,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T03:51:07,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-12-09T03:51:07,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 215 msec 2024-12-09T03:51:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:51:07,829 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T03:51:07,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:51:07,833 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:51:07,833 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:07,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,834 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:51:07,834 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:51:07,834 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1758630675, stopped=false 2024-12-09T03:51:07,834 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=55d0183f16d2,33217,1733716265721 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:07,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:07,836 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:51:07,837 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:51:07,837 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:07,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,837 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '55d0183f16d2,37651,1733716265767' ***** 2024-12-09T03:51:07,837 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:51:07,837 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '55d0183f16d2,45049,1733716265798' ***** 2024-12-09T03:51:07,837 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:07,837 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:51:07,837 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:07,837 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '55d0183f16d2,41463,1733716265826' ***** 2024-12-09T03:51:07,837 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:51:07,837 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:51:07,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:07,838 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:51:07,838 INFO [RS:0;55d0183f16d2:37651 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:51:07,838 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:51:07,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:51:07,838 INFO [RS:1;55d0183f16d2:45049 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:51:07,838 INFO [RS:1;55d0183f16d2:45049 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:51:07,838 INFO [RS:0;55d0183f16d2:37651 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:51:07,838 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:51:07,838 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(959): stopping server 55d0183f16d2,45049,1733716265798 2024-12-09T03:51:07,839 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(959): stopping server 55d0183f16d2,37651,1733716265767 2024-12-09T03:51:07,839 INFO [RS:1;55d0183f16d2:45049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:07,839 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:51:07,839 INFO [RS:0;55d0183f16d2:37651 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:07,839 INFO [RS:2;55d0183f16d2:41463 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:51:07,839 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:51:07,839 INFO [RS:1;55d0183f16d2:45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;55d0183f16d2:45049. 2024-12-09T03:51:07,839 DEBUG [RS:1;55d0183f16d2:45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:07,839 DEBUG [RS:1;55d0183f16d2:45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,839 INFO [RS:0;55d0183f16d2:37651 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;55d0183f16d2:37651. 2024-12-09T03:51:07,839 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(976): stopping server 55d0183f16d2,45049,1733716265798; all regions closed. 2024-12-09T03:51:07,839 DEBUG [RS:0;55d0183f16d2:37651 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:07,839 DEBUG [RS:0;55d0183f16d2:37651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,840 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(976): stopping server 55d0183f16d2,37651,1733716265767; all regions closed. 2024-12-09T03:51:07,840 INFO [RS:2;55d0183f16d2:41463 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:51:07,840 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(3091): Received CLOSE for 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,840 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(959): stopping server 55d0183f16d2,41463,1733716265826 2024-12-09T03:51:07,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [RS:2;55d0183f16d2:41463 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:07,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,840 INFO [RS:2;55d0183f16d2:41463 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;55d0183f16d2:41463. 2024-12-09T03:51:07,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,841 DEBUG [RS:2;55d0183f16d2:41463 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:51:07,841 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,841 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:07,841 DEBUG [RS:2;55d0183f16d2:41463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,841 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:51:07,841 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:51:07,841 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:51:07,841 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:51:07,841 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 850ab1a436d32737e2568380ca0a08a4, disabling compactions & flushes 2024-12-09T03:51:07,841 INFO [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,842 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,842 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. after waiting 0 ms 2024-12-09T03:51:07,842 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,842 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T03:51:07,842 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1325): Online Regions={850ab1a436d32737e2568380ca0a08a4=TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:51:07,842 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:51:07,842 DEBUG [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 850ab1a436d32737e2568380ca0a08a4 2024-12-09T03:51:07,842 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:51:07,842 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:51:07,843 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:51:07,843 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:51:07,843 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T03:51:07,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741835_1011 (size=93) 2024-12-09T03:51:07,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741835_1011 (size=93) 2024-12-09T03:51:07,848 DEBUG [RS:0;55d0183f16d2:37651 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs 2024-12-09T03:51:07,848 INFO [RS:0;55d0183f16d2:37651 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 55d0183f16d2%2C37651%2C1733716265767:(num 1733716266251) 2024-12-09T03:51:07,848 DEBUG [RS:0;55d0183f16d2:37651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,848 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:07,848 INFO [RS:0;55d0183f16d2:37651 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:07,848 INFO [RS:0;55d0183f16d2:37651 {}] hbase.ChoreService(370): Chore service for: regionserver/55d0183f16d2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:07,849 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:51:07,849 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:51:07,849 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:51:07,849 INFO [RS:0;55d0183f16d2:37651 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:07,849 INFO [RS:0;55d0183f16d2:37651 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37651 2024-12-09T03:51:07,849 INFO [regionserver/55d0183f16d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:07,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741835_1011 (size=93) 2024-12-09T03:51:07,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741834_1010 (size=93) 2024-12-09T03:51:07,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741834_1010 (size=93) 2024-12-09T03:51:07,850 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/default/TestHBaseWalOnEC/850ab1a436d32737e2568380ca0a08a4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T03:51:07,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:07,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741834_1010 (size=93) 2024-12-09T03:51:07,851 INFO [RS:0;55d0183f16d2:37651 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:07,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/55d0183f16d2,37651,1733716265767 2024-12-09T03:51:07,852 INFO [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,852 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 850ab1a436d32737e2568380ca0a08a4: Waiting for close lock at 1733716267841Running coprocessor pre-close hooks at 1733716267841Disabling compacts and flushes for region at 1733716267841Disabling writes for close at 1733716267842 (+1 ms)Writing region close event to WAL at 1733716267843 (+1 ms)Running coprocessor post-close hooks at 1733716267851 (+8 ms)Closed at 1733716267852 (+1 ms) 2024-12-09T03:51:07,852 DEBUG [RS_CLOSE_REGION-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4. 2024-12-09T03:51:07,854 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [55d0183f16d2,37651,1733716265767] 2024-12-09T03:51:07,855 DEBUG [RS:1;55d0183f16d2:45049 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs 2024-12-09T03:51:07,855 INFO [RS:1;55d0183f16d2:45049 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 55d0183f16d2%2C45049%2C1733716265798:(num 1733716266250) 2024-12-09T03:51:07,855 DEBUG [RS:1;55d0183f16d2:45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:07,855 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:07,855 INFO [RS:1;55d0183f16d2:45049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:07,856 INFO [RS:1;55d0183f16d2:45049 {}] hbase.ChoreService(370): Chore service for: regionserver/55d0183f16d2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:07,856 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:51:07,856 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:51:07,856 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:51:07,856 INFO [regionserver/55d0183f16d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:07,856 INFO [RS:1;55d0183f16d2:45049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:07,856 INFO [RS:1;55d0183f16d2:45049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45049 2024-12-09T03:51:07,857 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/55d0183f16d2,37651,1733716265767 already deleted, retry=false 2024-12-09T03:51:07,857 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 55d0183f16d2,37651,1733716265767 expired; onlineServers=2 2024-12-09T03:51:07,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/55d0183f16d2,45049,1733716265798 2024-12-09T03:51:07,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:07,857 INFO [RS:1;55d0183f16d2:45049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:07,859 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [55d0183f16d2,45049,1733716265798] 2024-12-09T03:51:07,860 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/55d0183f16d2,45049,1733716265798 already deleted, retry=false 2024-12-09T03:51:07,860 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 55d0183f16d2,45049,1733716265798 expired; onlineServers=1 2024-12-09T03:51:07,873 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/info/428ec8ffb7f049a4b63c4e040b103ebe is 153, key is TestHBaseWalOnEC,,1733716266861.850ab1a436d32737e2568380ca0a08a4./info:regioninfo/1733716267235/Put/seqid=0 2024-12-09T03:51:07,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741840_1016 (size=6637) 2024-12-09T03:51:07,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741840_1016 (size=6637) 2024-12-09T03:51:07,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741840_1016 (size=6637) 2024-12-09T03:51:07,883 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/info/428ec8ffb7f049a4b63c4e040b103ebe 2024-12-09T03:51:07,907 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/ns/0622e02d96b444f3a2e887af6b1d37f5 is 43, key is default/ns:d/1733716266733/Put/seqid=0 2024-12-09T03:51:07,914 INFO [regionserver/55d0183f16d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:07,914 INFO [regionserver/55d0183f16d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:07,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741841_1017 (size=5153) 2024-12-09T03:51:07,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741841_1017 (size=5153) 2024-12-09T03:51:07,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741841_1017 (size=5153) 2024-12-09T03:51:07,915 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/ns/0622e02d96b444f3a2e887af6b1d37f5 2024-12-09T03:51:07,916 INFO [regionserver/55d0183f16d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:07,937 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:51:07,939 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/table/4689f3f58d7349ad8efcb8e1ef93858b is 52, key is TestHBaseWalOnEC/table:state/1733716267249/Put/seqid=0 2024-12-09T03:51:07,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:51:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741842_1018 (size=5249) 2024-12-09T03:51:07,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741842_1018 (size=5249) 2024-12-09T03:51:07,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741842_1018 (size=5249) 2024-12-09T03:51:07,952 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/table/4689f3f58d7349ad8efcb8e1ef93858b 2024-12-09T03:51:07,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:07,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37651-0x1019558b5750001, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:07,954 INFO [RS:0;55d0183f16d2:37651 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:07,954 INFO [RS:0;55d0183f16d2:37651 {}] regionserver.HRegionServer(1031): Exiting; stopping=55d0183f16d2,37651,1733716265767; zookeeper connection closed. 2024-12-09T03:51:07,954 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70e02e9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70e02e9f 2024-12-09T03:51:07,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:07,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45049-0x1019558b5750002, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:07,959 INFO [RS:1;55d0183f16d2:45049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:07,959 INFO [RS:1;55d0183f16d2:45049 {}] regionserver.HRegionServer(1031): Exiting; stopping=55d0183f16d2,45049,1733716265798; zookeeper connection closed. 2024-12-09T03:51:07,959 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@53e171c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@53e171c0 2024-12-09T03:51:07,962 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/info/428ec8ffb7f049a4b63c4e040b103ebe as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/info/428ec8ffb7f049a4b63c4e040b103ebe 2024-12-09T03:51:07,969 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/info/428ec8ffb7f049a4b63c4e040b103ebe, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T03:51:07,970 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/ns/0622e02d96b444f3a2e887af6b1d37f5 as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/ns/0622e02d96b444f3a2e887af6b1d37f5 2024-12-09T03:51:07,977 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/ns/0622e02d96b444f3a2e887af6b1d37f5, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T03:51:07,978 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/.tmp/table/4689f3f58d7349ad8efcb8e1ef93858b as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/table/4689f3f58d7349ad8efcb8e1ef93858b 2024-12-09T03:51:07,986 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/table/4689f3f58d7349ad8efcb8e1ef93858b, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T03:51:07,987 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false 2024-12-09T03:51:07,997 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T03:51:07,998 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:51:07,998 INFO [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:51:07,999 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733716267842Running coprocessor pre-close hooks at 1733716267842Disabling compacts and flushes for region at 1733716267842Disabling writes for close at 1733716267843 (+1 ms)Obtaining lock to block concurrent updates at 1733716267843Preparing flush snapshotting stores in 1588230740 at 1733716267843Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733716267843Flushing stores of hbase:meta,,1.1588230740 at 1733716267846 (+3 ms)Flushing 1588230740/info: creating writer at 1733716267846Flushing 1588230740/info: appending metadata at 1733716267872 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733716267873 (+1 ms)Flushing 1588230740/ns: creating writer at 1733716267891 (+18 ms)Flushing 1588230740/ns: appending metadata at 1733716267907 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733716267907Flushing 1588230740/table: creating writer at 1733716267923 (+16 ms)Flushing 1588230740/table: appending metadata at 1733716267939 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733716267939Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16750dd2: reopening flushed file at 1733716267961 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cfef5b6: reopening flushed file at 1733716267970 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@516160e1: reopening flushed file at 1733716267977 (+7 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false at 1733716267987 (+10 ms)Writing region close event to WAL at 1733716267993 (+6 ms)Running coprocessor post-close hooks at 1733716267998 (+5 ms)Closed at 1733716267998 2024-12-09T03:51:07,999 DEBUG [RS_CLOSE_META-regionserver/55d0183f16d2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:51:08,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:51:08,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:51:08,043 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(976): stopping server 55d0183f16d2,41463,1733716265826; all regions closed. 2024-12-09T03:51:08,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741836_1012 (size=2751) 2024-12-09T03:51:08,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741836_1012 (size=2751) 2024-12-09T03:51:08,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741836_1012 (size=2751) 2024-12-09T03:51:08,054 DEBUG [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs 2024-12-09T03:51:08,054 INFO [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 55d0183f16d2%2C41463%2C1733716265826.meta:.meta(num 1733716266680) 2024-12-09T03:51:08,054 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,055 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,055 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,055 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,055 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741833_1009 (size=1298) 2024-12-09T03:51:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741833_1009 (size=1298) 2024-12-09T03:51:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741833_1009 (size=1298) 2024-12-09T03:51:08,061 DEBUG [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/oldWALs 2024-12-09T03:51:08,061 INFO [RS:2;55d0183f16d2:41463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 55d0183f16d2%2C41463%2C1733716265826:(num 1733716266249) 2024-12-09T03:51:08,061 DEBUG [RS:2;55d0183f16d2:41463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:51:08,061 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:51:08,061 INFO [RS:2;55d0183f16d2:41463 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:08,062 INFO [RS:2;55d0183f16d2:41463 {}] hbase.ChoreService(370): Chore service for: regionserver/55d0183f16d2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:08,062 INFO [RS:2;55d0183f16d2:41463 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:08,062 INFO [regionserver/55d0183f16d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:08,062 INFO [RS:2;55d0183f16d2:41463 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41463 2024-12-09T03:51:08,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/55d0183f16d2,41463,1733716265826 2024-12-09T03:51:08,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:51:08,065 INFO [RS:2;55d0183f16d2:41463 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:08,067 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [55d0183f16d2,41463,1733716265826] 2024-12-09T03:51:08,068 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/55d0183f16d2,41463,1733716265826 already deleted, retry=false 2024-12-09T03:51:08,068 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 55d0183f16d2,41463,1733716265826 expired; onlineServers=0 2024-12-09T03:51:08,068 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '55d0183f16d2,33217,1733716265721' ***** 2024-12-09T03:51:08,068 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:51:08,068 INFO [M:0;55d0183f16d2:33217 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:51:08,069 INFO [M:0;55d0183f16d2:33217 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:51:08,069 DEBUG [M:0;55d0183f16d2:33217 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:51:08,069 DEBUG [M:0;55d0183f16d2:33217 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:51:08,069 DEBUG [master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.small.0-1733716266040 {}] cleaner.HFileCleaner(306): Exit Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.small.0-1733716266040,5,FailOnTimeoutGroup] 2024-12-09T03:51:08,069 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:51:08,069 DEBUG [master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.large.0-1733716266039 {}] cleaner.HFileCleaner(306): Exit Thread[master/55d0183f16d2:0:becomeActiveMaster-HFileCleaner.large.0-1733716266039,5,FailOnTimeoutGroup] 2024-12-09T03:51:08,069 INFO [M:0;55d0183f16d2:33217 {}] hbase.ChoreService(370): Chore service for: master/55d0183f16d2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:51:08,069 INFO [M:0;55d0183f16d2:33217 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:51:08,069 DEBUG [M:0;55d0183f16d2:33217 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:51:08,069 INFO [M:0;55d0183f16d2:33217 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:51:08,070 INFO [M:0;55d0183f16d2:33217 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:51:08,070 INFO [M:0;55d0183f16d2:33217 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:51:08,070 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:51:08,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:51:08,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:51:08,071 DEBUG [M:0;55d0183f16d2:33217 {}] zookeeper.ZKUtil(347): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:51:08,071 WARN [M:0;55d0183f16d2:33217 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:51:08,071 INFO [M:0;55d0183f16d2:33217 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/.lastflushedseqids 2024-12-09T03:51:08,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741843_1019 (size=127) 2024-12-09T03:51:08,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741843_1019 (size=127) 2024-12-09T03:51:08,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741843_1019 (size=127) 2024-12-09T03:51:08,081 INFO [M:0;55d0183f16d2:33217 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:51:08,081 INFO [M:0;55d0183f16d2:33217 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:51:08,082 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:51:08,082 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:08,082 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:08,082 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:51:08,082 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:08,082 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-12-09T03:51:08,108 DEBUG [M:0;55d0183f16d2:33217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0e2b57686f2d4e4181dfe2ec572c1108 is 82, key is hbase:meta,,1/info:regioninfo/1733716266715/Put/seqid=0 2024-12-09T03:51:08,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741844_1020 (size=5672) 2024-12-09T03:51:08,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741844_1020 (size=5672) 2024-12-09T03:51:08,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741844_1020 (size=5672) 2024-12-09T03:51:08,117 INFO [M:0;55d0183f16d2:33217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0e2b57686f2d4e4181dfe2ec572c1108 2024-12-09T03:51:08,141 DEBUG [M:0;55d0183f16d2:33217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2f9578a94b174c278b6646a8bf75d12d is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733716267255/Put/seqid=0 2024-12-09T03:51:08,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741845_1021 (size=6441) 2024-12-09T03:51:08,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741845_1021 (size=6441) 2024-12-09T03:51:08,187 INFO [RS:2;55d0183f16d2:41463 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:08,187 INFO [RS:2;55d0183f16d2:41463 {}] regionserver.HRegionServer(1031): Exiting; stopping=55d0183f16d2,41463,1733716265826; zookeeper connection closed. 2024-12-09T03:51:08,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:08,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41463-0x1019558b5750003, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:08,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741845_1021 (size=6441) 2024-12-09T03:51:08,188 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7afecd67 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7afecd67 2024-12-09T03:51:08,188 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T03:51:08,189 INFO [M:0;55d0183f16d2:33217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2f9578a94b174c278b6646a8bf75d12d 2024-12-09T03:51:08,213 DEBUG [M:0;55d0183f16d2:33217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6069084b794f436595569d48622f14d6 is 69, key is 55d0183f16d2,37651,1733716265767/rs:state/1733716266076/Put/seqid=0 2024-12-09T03:51:08,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741846_1022 (size=5294) 2024-12-09T03:51:08,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741846_1022 (size=5294) 2024-12-09T03:51:08,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741846_1022 (size=5294) 2024-12-09T03:51:08,232 INFO [M:0;55d0183f16d2:33217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6069084b794f436595569d48622f14d6 2024-12-09T03:51:08,240 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0e2b57686f2d4e4181dfe2ec572c1108 as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0e2b57686f2d4e4181dfe2ec572c1108 2024-12-09T03:51:08,247 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0e2b57686f2d4e4181dfe2ec572c1108, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T03:51:08,248 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2f9578a94b174c278b6646a8bf75d12d as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2f9578a94b174c278b6646a8bf75d12d 2024-12-09T03:51:08,255 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2f9578a94b174c278b6646a8bf75d12d, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T03:51:08,256 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6069084b794f436595569d48622f14d6 as hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6069084b794f436595569d48622f14d6 2024-12-09T03:51:08,263 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33743/user/jenkins/test-data/cc66a5d2-b834-8701-44c6-f7ffdac36569/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6069084b794f436595569d48622f14d6, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T03:51:08,265 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 183ms, sequenceid=72, compaction requested=false 2024-12-09T03:51:08,266 INFO [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:51:08,267 DEBUG [M:0;55d0183f16d2:33217 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733716268081Disabling compacts and flushes for region at 1733716268081Disabling writes for close at 1733716268082 (+1 ms)Obtaining lock to block concurrent updates at 1733716268082Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733716268082Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1733716268083 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733716268084 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733716268084Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733716268107 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733716268107Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733716268124 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733716268141 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733716268141Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733716268195 (+54 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733716268212 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733716268212Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3882d6ad: reopening flushed file at 1733716268239 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fe45592: reopening flushed file at 1733716268247 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24053428: reopening flushed file at 1733716268255 (+8 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 183ms, sequenceid=72, compaction requested=false at 1733716268265 (+10 ms)Writing region close event to WAL at 1733716268266 (+1 ms)Closed at 1733716268266 2024-12-09T03:51:08,267 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,267 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,267 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,267 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:51:08,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46383 is added to blk_1073741830_1006 (size=32695) 2024-12-09T03:51:08,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741830_1006 (size=32695) 2024-12-09T03:51:08,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44565 is added to blk_1073741830_1006 (size=32695) 2024-12-09T03:51:08,272 INFO [M:0;55d0183f16d2:33217 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:51:08,272 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:51:08,272 INFO [M:0;55d0183f16d2:33217 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33217 2024-12-09T03:51:08,272 INFO [M:0;55d0183f16d2:33217 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:51:08,375 INFO [M:0;55d0183f16d2:33217 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:51:08,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:08,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33217-0x1019558b5750000, quorum=127.0.0.1:63236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:51:08,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f50f857{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:08,378 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7eeef71e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:08,378 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:08,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40b03519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:08,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44968fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:08,380 WARN [BP-2080191298-172.17.0.2-1733716264747 heartbeating to localhost/127.0.0.1:33743 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:51:08,380 WARN [BP-2080191298-172.17.0.2-1733716264747 heartbeating to localhost/127.0.0.1:33743 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2080191298-172.17.0.2-1733716264747 (Datanode Uuid 61eecd26-d523-4d59-b7ab-20f29198f80f) service to localhost/127.0.0.1:33743 2024-12-09T03:51:08,380 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:51:08,380 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:51:08,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data5/current/BP-2080191298-172.17.0.2-1733716264747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:08,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data6/current/BP-2080191298-172.17.0.2-1733716264747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:08,382 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:51:08,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46f4cd0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:08,384 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@700b2317{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:08,384 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:08,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55cf3a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:08,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4148d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:08,385 WARN [BP-2080191298-172.17.0.2-1733716264747 heartbeating to localhost/127.0.0.1:33743 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:51:08,385 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:51:08,385 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:51:08,385 WARN [BP-2080191298-172.17.0.2-1733716264747 heartbeating to localhost/127.0.0.1:33743 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2080191298-172.17.0.2-1733716264747 (Datanode Uuid 67edc136-2845-40bd-a2c7-9d1a49595dc0) service to localhost/127.0.0.1:33743 2024-12-09T03:51:08,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data3/current/BP-2080191298-172.17.0.2-1733716264747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:08,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data4/current/BP-2080191298-172.17.0.2-1733716264747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:08,387 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:51:08,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18f854cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:51:08,389 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70fdfe33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:08,389 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:08,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72f96008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:08,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c4ebd49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:08,390 WARN [BP-2080191298-172.17.0.2-1733716264747 heartbeating to localhost/127.0.0.1:33743 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:51:08,390 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:51:08,390 WARN [BP-2080191298-172.17.0.2-1733716264747 heartbeating to localhost/127.0.0.1:33743 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2080191298-172.17.0.2-1733716264747 (Datanode Uuid cd1defce-e3d3-4aae-b252-7232425fba6e) service to localhost/127.0.0.1:33743 2024-12-09T03:51:08,390 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:51:08,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data1/current/BP-2080191298-172.17.0.2-1733716264747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:08,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/cluster_0af88036-76a7-602a-8d9f-c49a80dfc068/data/data2/current/BP-2080191298-172.17.0.2-1733716264747 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:51:08,391 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:51:08,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15027254{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:51:08,398 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4293887f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:51:08,398 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:51:08,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e58533{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:51:08,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ad8d9de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf58fc72-8c6f-14ce-8b27-473b016e5081/hadoop.log.dir/,STOPPED} 2024-12-09T03:51:08,405 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:51:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:51:08,436 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=145 (was 88) - Thread LEAK? -, OpenFileDescriptor=521 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=181 (was 153) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=11762 (was 11995)